1 /* 2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/clk-provider.h> 14 #include <linux/clk/clk-conf.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/spinlock.h> 18 #include <linux/err.h> 19 #include <linux/list.h> 20 #include <linux/slab.h> 21 #include <linux/of.h> 22 #include <linux/device.h> 23 #include <linux/init.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/sched.h> 26 #include <linux/clkdev.h> 27 28 #include "clk.h" 29 30 static DEFINE_SPINLOCK(enable_lock); 31 static DEFINE_MUTEX(prepare_lock); 32 33 static struct task_struct *prepare_owner; 34 static struct task_struct *enable_owner; 35 36 static int prepare_refcnt; 37 static int enable_refcnt; 38 39 static HLIST_HEAD(clk_root_list); 40 static HLIST_HEAD(clk_orphan_list); 41 static LIST_HEAD(clk_notifier_list); 42 43 /*** private data structures ***/ 44 45 struct clk_core { 46 const char *name; 47 const struct clk_ops *ops; 48 struct clk_hw *hw; 49 struct module *owner; 50 struct device *dev; 51 struct clk_core *parent; 52 const char **parent_names; 53 struct clk_core **parents; 54 u8 num_parents; 55 u8 new_parent_index; 56 unsigned long rate; 57 unsigned long req_rate; 58 unsigned long new_rate; 59 struct clk_core *new_parent; 60 struct clk_core *new_child; 61 unsigned long flags; 62 bool orphan; 63 unsigned int enable_count; 64 unsigned int prepare_count; 65 unsigned int protect_count; 66 unsigned long min_rate; 67 unsigned long max_rate; 68 unsigned long accuracy; 69 int phase; 70 struct hlist_head children; 71 struct hlist_node child_node; 72 struct hlist_head clks; 73 unsigned int notifier_count; 74 #ifdef CONFIG_DEBUG_FS 75 struct dentry *dentry; 76 struct hlist_node debug_node; 77 #endif 78 struct kref ref; 79 }; 80 81 #define CREATE_TRACE_POINTS 82 #include <trace/events/clk.h> 83 84 struct clk { 85 struct clk_core *core; 86 const char *dev_id; 87 const char *con_id; 88 unsigned long min_rate; 89 unsigned long max_rate; 90 unsigned int exclusive_count; 91 struct hlist_node clks_node; 92 }; 93 94 /*** runtime pm ***/ 95 static int clk_pm_runtime_get(struct clk_core *core) 96 { 97 int ret = 0; 98 99 if (!core->dev) 100 return 0; 101 102 ret = pm_runtime_get_sync(core->dev); 103 return ret < 0 ? ret : 0; 104 } 105 106 static void clk_pm_runtime_put(struct clk_core *core) 107 { 108 if (!core->dev) 109 return; 110 111 pm_runtime_put_sync(core->dev); 112 } 113 114 /*** locking ***/ 115 static void clk_prepare_lock(void) 116 { 117 if (!mutex_trylock(&prepare_lock)) { 118 if (prepare_owner == current) { 119 prepare_refcnt++; 120 return; 121 } 122 mutex_lock(&prepare_lock); 123 } 124 WARN_ON_ONCE(prepare_owner != NULL); 125 WARN_ON_ONCE(prepare_refcnt != 0); 126 prepare_owner = current; 127 prepare_refcnt = 1; 128 } 129 130 static void clk_prepare_unlock(void) 131 { 132 WARN_ON_ONCE(prepare_owner != current); 133 WARN_ON_ONCE(prepare_refcnt == 0); 134 135 if (--prepare_refcnt) 136 return; 137 prepare_owner = NULL; 138 mutex_unlock(&prepare_lock); 139 } 140 141 static unsigned long clk_enable_lock(void) 142 __acquires(enable_lock) 143 { 144 unsigned long flags; 145 146 /* 147 * On UP systems, spin_trylock_irqsave() always returns true, even if 148 * we already hold the lock. So, in that case, we rely only on 149 * reference counting. 150 */ 151 if (!IS_ENABLED(CONFIG_SMP) || 152 !spin_trylock_irqsave(&enable_lock, flags)) { 153 if (enable_owner == current) { 154 enable_refcnt++; 155 __acquire(enable_lock); 156 if (!IS_ENABLED(CONFIG_SMP)) 157 local_save_flags(flags); 158 return flags; 159 } 160 spin_lock_irqsave(&enable_lock, flags); 161 } 162 WARN_ON_ONCE(enable_owner != NULL); 163 WARN_ON_ONCE(enable_refcnt != 0); 164 enable_owner = current; 165 enable_refcnt = 1; 166 return flags; 167 } 168 169 static void clk_enable_unlock(unsigned long flags) 170 __releases(enable_lock) 171 { 172 WARN_ON_ONCE(enable_owner != current); 173 WARN_ON_ONCE(enable_refcnt == 0); 174 175 if (--enable_refcnt) { 176 __release(enable_lock); 177 return; 178 } 179 enable_owner = NULL; 180 spin_unlock_irqrestore(&enable_lock, flags); 181 } 182 183 static bool clk_core_rate_is_protected(struct clk_core *core) 184 { 185 return core->protect_count; 186 } 187 188 static bool clk_core_is_prepared(struct clk_core *core) 189 { 190 bool ret = false; 191 192 /* 193 * .is_prepared is optional for clocks that can prepare 194 * fall back to software usage counter if it is missing 195 */ 196 if (!core->ops->is_prepared) 197 return core->prepare_count; 198 199 if (!clk_pm_runtime_get(core)) { 200 ret = core->ops->is_prepared(core->hw); 201 clk_pm_runtime_put(core); 202 } 203 204 return ret; 205 } 206 207 static bool clk_core_is_enabled(struct clk_core *core) 208 { 209 bool ret = false; 210 211 /* 212 * .is_enabled is only mandatory for clocks that gate 213 * fall back to software usage counter if .is_enabled is missing 214 */ 215 if (!core->ops->is_enabled) 216 return core->enable_count; 217 218 /* 219 * Check if clock controller's device is runtime active before 220 * calling .is_enabled callback. If not, assume that clock is 221 * disabled, because we might be called from atomic context, from 222 * which pm_runtime_get() is not allowed. 223 * This function is called mainly from clk_disable_unused_subtree, 224 * which ensures proper runtime pm activation of controller before 225 * taking enable spinlock, but the below check is needed if one tries 226 * to call it from other places. 227 */ 228 if (core->dev) { 229 pm_runtime_get_noresume(core->dev); 230 if (!pm_runtime_active(core->dev)) { 231 ret = false; 232 goto done; 233 } 234 } 235 236 ret = core->ops->is_enabled(core->hw); 237 done: 238 if (core->dev) 239 pm_runtime_put(core->dev); 240 241 return ret; 242 } 243 244 /*** helper functions ***/ 245 246 const char *__clk_get_name(const struct clk *clk) 247 { 248 return !clk ? NULL : clk->core->name; 249 } 250 EXPORT_SYMBOL_GPL(__clk_get_name); 251 252 const char *clk_hw_get_name(const struct clk_hw *hw) 253 { 254 return hw->core->name; 255 } 256 EXPORT_SYMBOL_GPL(clk_hw_get_name); 257 258 struct clk_hw *__clk_get_hw(struct clk *clk) 259 { 260 return !clk ? NULL : clk->core->hw; 261 } 262 EXPORT_SYMBOL_GPL(__clk_get_hw); 263 264 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) 265 { 266 return hw->core->num_parents; 267 } 268 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); 269 270 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) 271 { 272 return hw->core->parent ? hw->core->parent->hw : NULL; 273 } 274 EXPORT_SYMBOL_GPL(clk_hw_get_parent); 275 276 static struct clk_core *__clk_lookup_subtree(const char *name, 277 struct clk_core *core) 278 { 279 struct clk_core *child; 280 struct clk_core *ret; 281 282 if (!strcmp(core->name, name)) 283 return core; 284 285 hlist_for_each_entry(child, &core->children, child_node) { 286 ret = __clk_lookup_subtree(name, child); 287 if (ret) 288 return ret; 289 } 290 291 return NULL; 292 } 293 294 static struct clk_core *clk_core_lookup(const char *name) 295 { 296 struct clk_core *root_clk; 297 struct clk_core *ret; 298 299 if (!name) 300 return NULL; 301 302 /* search the 'proper' clk tree first */ 303 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 304 ret = __clk_lookup_subtree(name, root_clk); 305 if (ret) 306 return ret; 307 } 308 309 /* if not found, then search the orphan tree */ 310 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 311 ret = __clk_lookup_subtree(name, root_clk); 312 if (ret) 313 return ret; 314 } 315 316 return NULL; 317 } 318 319 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, 320 u8 index) 321 { 322 if (!core || index >= core->num_parents) 323 return NULL; 324 325 if (!core->parents[index]) 326 core->parents[index] = 327 clk_core_lookup(core->parent_names[index]); 328 329 return core->parents[index]; 330 } 331 332 struct clk_hw * 333 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) 334 { 335 struct clk_core *parent; 336 337 parent = clk_core_get_parent_by_index(hw->core, index); 338 339 return !parent ? NULL : parent->hw; 340 } 341 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); 342 343 unsigned int __clk_get_enable_count(struct clk *clk) 344 { 345 return !clk ? 0 : clk->core->enable_count; 346 } 347 348 static unsigned long clk_core_get_rate_nolock(struct clk_core *core) 349 { 350 unsigned long ret; 351 352 if (!core) { 353 ret = 0; 354 goto out; 355 } 356 357 ret = core->rate; 358 359 if (!core->num_parents) 360 goto out; 361 362 if (!core->parent) 363 ret = 0; 364 365 out: 366 return ret; 367 } 368 369 unsigned long clk_hw_get_rate(const struct clk_hw *hw) 370 { 371 return clk_core_get_rate_nolock(hw->core); 372 } 373 EXPORT_SYMBOL_GPL(clk_hw_get_rate); 374 375 static unsigned long __clk_get_accuracy(struct clk_core *core) 376 { 377 if (!core) 378 return 0; 379 380 return core->accuracy; 381 } 382 383 unsigned long __clk_get_flags(struct clk *clk) 384 { 385 return !clk ? 0 : clk->core->flags; 386 } 387 EXPORT_SYMBOL_GPL(__clk_get_flags); 388 389 unsigned long clk_hw_get_flags(const struct clk_hw *hw) 390 { 391 return hw->core->flags; 392 } 393 EXPORT_SYMBOL_GPL(clk_hw_get_flags); 394 395 bool clk_hw_is_prepared(const struct clk_hw *hw) 396 { 397 return clk_core_is_prepared(hw->core); 398 } 399 400 bool clk_hw_rate_is_protected(const struct clk_hw *hw) 401 { 402 return clk_core_rate_is_protected(hw->core); 403 } 404 405 bool clk_hw_is_enabled(const struct clk_hw *hw) 406 { 407 return clk_core_is_enabled(hw->core); 408 } 409 410 bool __clk_is_enabled(struct clk *clk) 411 { 412 if (!clk) 413 return false; 414 415 return clk_core_is_enabled(clk->core); 416 } 417 EXPORT_SYMBOL_GPL(__clk_is_enabled); 418 419 static bool mux_is_better_rate(unsigned long rate, unsigned long now, 420 unsigned long best, unsigned long flags) 421 { 422 if (flags & CLK_MUX_ROUND_CLOSEST) 423 return abs(now - rate) < abs(best - rate); 424 425 return now <= rate && now > best; 426 } 427 428 int clk_mux_determine_rate_flags(struct clk_hw *hw, 429 struct clk_rate_request *req, 430 unsigned long flags) 431 { 432 struct clk_core *core = hw->core, *parent, *best_parent = NULL; 433 int i, num_parents, ret; 434 unsigned long best = 0; 435 struct clk_rate_request parent_req = *req; 436 437 /* if NO_REPARENT flag set, pass through to current parent */ 438 if (core->flags & CLK_SET_RATE_NO_REPARENT) { 439 parent = core->parent; 440 if (core->flags & CLK_SET_RATE_PARENT) { 441 ret = __clk_determine_rate(parent ? parent->hw : NULL, 442 &parent_req); 443 if (ret) 444 return ret; 445 446 best = parent_req.rate; 447 } else if (parent) { 448 best = clk_core_get_rate_nolock(parent); 449 } else { 450 best = clk_core_get_rate_nolock(core); 451 } 452 453 goto out; 454 } 455 456 /* find the parent that can provide the fastest rate <= rate */ 457 num_parents = core->num_parents; 458 for (i = 0; i < num_parents; i++) { 459 parent = clk_core_get_parent_by_index(core, i); 460 if (!parent) 461 continue; 462 463 if (core->flags & CLK_SET_RATE_PARENT) { 464 parent_req = *req; 465 ret = __clk_determine_rate(parent->hw, &parent_req); 466 if (ret) 467 continue; 468 } else { 469 parent_req.rate = clk_core_get_rate_nolock(parent); 470 } 471 472 if (mux_is_better_rate(req->rate, parent_req.rate, 473 best, flags)) { 474 best_parent = parent; 475 best = parent_req.rate; 476 } 477 } 478 479 if (!best_parent) 480 return -EINVAL; 481 482 out: 483 if (best_parent) 484 req->best_parent_hw = best_parent->hw; 485 req->best_parent_rate = best; 486 req->rate = best; 487 488 return 0; 489 } 490 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); 491 492 struct clk *__clk_lookup(const char *name) 493 { 494 struct clk_core *core = clk_core_lookup(name); 495 496 return !core ? NULL : core->hw->clk; 497 } 498 499 static void clk_core_get_boundaries(struct clk_core *core, 500 unsigned long *min_rate, 501 unsigned long *max_rate) 502 { 503 struct clk *clk_user; 504 505 *min_rate = core->min_rate; 506 *max_rate = core->max_rate; 507 508 hlist_for_each_entry(clk_user, &core->clks, clks_node) 509 *min_rate = max(*min_rate, clk_user->min_rate); 510 511 hlist_for_each_entry(clk_user, &core->clks, clks_node) 512 *max_rate = min(*max_rate, clk_user->max_rate); 513 } 514 515 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, 516 unsigned long max_rate) 517 { 518 hw->core->min_rate = min_rate; 519 hw->core->max_rate = max_rate; 520 } 521 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); 522 523 /* 524 * Helper for finding best parent to provide a given frequency. This can be used 525 * directly as a determine_rate callback (e.g. for a mux), or from a more 526 * complex clock that may combine a mux with other operations. 527 */ 528 int __clk_mux_determine_rate(struct clk_hw *hw, 529 struct clk_rate_request *req) 530 { 531 return clk_mux_determine_rate_flags(hw, req, 0); 532 } 533 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 534 535 int __clk_mux_determine_rate_closest(struct clk_hw *hw, 536 struct clk_rate_request *req) 537 { 538 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); 539 } 540 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); 541 542 /*** clk api ***/ 543 544 static void clk_core_rate_unprotect(struct clk_core *core) 545 { 546 lockdep_assert_held(&prepare_lock); 547 548 if (!core) 549 return; 550 551 if (WARN(core->protect_count == 0, 552 "%s already unprotected\n", core->name)) 553 return; 554 555 if (--core->protect_count > 0) 556 return; 557 558 clk_core_rate_unprotect(core->parent); 559 } 560 561 static int clk_core_rate_nuke_protect(struct clk_core *core) 562 { 563 int ret; 564 565 lockdep_assert_held(&prepare_lock); 566 567 if (!core) 568 return -EINVAL; 569 570 if (core->protect_count == 0) 571 return 0; 572 573 ret = core->protect_count; 574 core->protect_count = 1; 575 clk_core_rate_unprotect(core); 576 577 return ret; 578 } 579 580 /** 581 * clk_rate_exclusive_put - release exclusivity over clock rate control 582 * @clk: the clk over which the exclusivity is released 583 * 584 * clk_rate_exclusive_put() completes a critical section during which a clock 585 * consumer cannot tolerate any other consumer making any operation on the 586 * clock which could result in a rate change or rate glitch. Exclusive clocks 587 * cannot have their rate changed, either directly or indirectly due to changes 588 * further up the parent chain of clocks. As a result, clocks up parent chain 589 * also get under exclusive control of the calling consumer. 590 * 591 * If exlusivity is claimed more than once on clock, even by the same consumer, 592 * the rate effectively gets locked as exclusivity can't be preempted. 593 * 594 * Calls to clk_rate_exclusive_put() must be balanced with calls to 595 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return 596 * error status. 597 */ 598 void clk_rate_exclusive_put(struct clk *clk) 599 { 600 if (!clk) 601 return; 602 603 clk_prepare_lock(); 604 605 /* 606 * if there is something wrong with this consumer protect count, stop 607 * here before messing with the provider 608 */ 609 if (WARN_ON(clk->exclusive_count <= 0)) 610 goto out; 611 612 clk_core_rate_unprotect(clk->core); 613 clk->exclusive_count--; 614 out: 615 clk_prepare_unlock(); 616 } 617 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); 618 619 static void clk_core_rate_protect(struct clk_core *core) 620 { 621 lockdep_assert_held(&prepare_lock); 622 623 if (!core) 624 return; 625 626 if (core->protect_count == 0) 627 clk_core_rate_protect(core->parent); 628 629 core->protect_count++; 630 } 631 632 static void clk_core_rate_restore_protect(struct clk_core *core, int count) 633 { 634 lockdep_assert_held(&prepare_lock); 635 636 if (!core) 637 return; 638 639 if (count == 0) 640 return; 641 642 clk_core_rate_protect(core); 643 core->protect_count = count; 644 } 645 646 /** 647 * clk_rate_exclusive_get - get exclusivity over the clk rate control 648 * @clk: the clk over which the exclusity of rate control is requested 649 * 650 * clk_rate_exlusive_get() begins a critical section during which a clock 651 * consumer cannot tolerate any other consumer making any operation on the 652 * clock which could result in a rate change or rate glitch. Exclusive clocks 653 * cannot have their rate changed, either directly or indirectly due to changes 654 * further up the parent chain of clocks. As a result, clocks up parent chain 655 * also get under exclusive control of the calling consumer. 656 * 657 * If exlusivity is claimed more than once on clock, even by the same consumer, 658 * the rate effectively gets locked as exclusivity can't be preempted. 659 * 660 * Calls to clk_rate_exclusive_get() should be balanced with calls to 661 * clk_rate_exclusive_put(). Calls to this function may sleep. 662 * Returns 0 on success, -EERROR otherwise 663 */ 664 int clk_rate_exclusive_get(struct clk *clk) 665 { 666 if (!clk) 667 return 0; 668 669 clk_prepare_lock(); 670 clk_core_rate_protect(clk->core); 671 clk->exclusive_count++; 672 clk_prepare_unlock(); 673 674 return 0; 675 } 676 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); 677 678 static void clk_core_unprepare(struct clk_core *core) 679 { 680 lockdep_assert_held(&prepare_lock); 681 682 if (!core) 683 return; 684 685 if (WARN(core->prepare_count == 0, 686 "%s already unprepared\n", core->name)) 687 return; 688 689 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, 690 "Unpreparing critical %s\n", core->name)) 691 return; 692 693 if (--core->prepare_count > 0) 694 return; 695 696 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name); 697 698 trace_clk_unprepare(core); 699 700 if (core->ops->unprepare) 701 core->ops->unprepare(core->hw); 702 703 clk_pm_runtime_put(core); 704 705 trace_clk_unprepare_complete(core); 706 clk_core_unprepare(core->parent); 707 } 708 709 static void clk_core_unprepare_lock(struct clk_core *core) 710 { 711 clk_prepare_lock(); 712 clk_core_unprepare(core); 713 clk_prepare_unlock(); 714 } 715 716 /** 717 * clk_unprepare - undo preparation of a clock source 718 * @clk: the clk being unprepared 719 * 720 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 721 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 722 * if the operation may sleep. One example is a clk which is accessed over 723 * I2c. In the complex case a clk gate operation may require a fast and a slow 724 * part. It is this reason that clk_unprepare and clk_disable are not mutually 725 * exclusive. In fact clk_disable must be called before clk_unprepare. 726 */ 727 void clk_unprepare(struct clk *clk) 728 { 729 if (IS_ERR_OR_NULL(clk)) 730 return; 731 732 clk_core_unprepare_lock(clk->core); 733 } 734 EXPORT_SYMBOL_GPL(clk_unprepare); 735 736 static int clk_core_prepare(struct clk_core *core) 737 { 738 int ret = 0; 739 740 lockdep_assert_held(&prepare_lock); 741 742 if (!core) 743 return 0; 744 745 if (core->prepare_count == 0) { 746 ret = clk_pm_runtime_get(core); 747 if (ret) 748 return ret; 749 750 ret = clk_core_prepare(core->parent); 751 if (ret) 752 goto runtime_put; 753 754 trace_clk_prepare(core); 755 756 if (core->ops->prepare) 757 ret = core->ops->prepare(core->hw); 758 759 trace_clk_prepare_complete(core); 760 761 if (ret) 762 goto unprepare; 763 } 764 765 core->prepare_count++; 766 767 return 0; 768 unprepare: 769 clk_core_unprepare(core->parent); 770 runtime_put: 771 clk_pm_runtime_put(core); 772 return ret; 773 } 774 775 static int clk_core_prepare_lock(struct clk_core *core) 776 { 777 int ret; 778 779 clk_prepare_lock(); 780 ret = clk_core_prepare(core); 781 clk_prepare_unlock(); 782 783 return ret; 784 } 785 786 /** 787 * clk_prepare - prepare a clock source 788 * @clk: the clk being prepared 789 * 790 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 791 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 792 * operation may sleep. One example is a clk which is accessed over I2c. In 793 * the complex case a clk ungate operation may require a fast and a slow part. 794 * It is this reason that clk_prepare and clk_enable are not mutually 795 * exclusive. In fact clk_prepare must be called before clk_enable. 796 * Returns 0 on success, -EERROR otherwise. 797 */ 798 int clk_prepare(struct clk *clk) 799 { 800 if (!clk) 801 return 0; 802 803 return clk_core_prepare_lock(clk->core); 804 } 805 EXPORT_SYMBOL_GPL(clk_prepare); 806 807 static void clk_core_disable(struct clk_core *core) 808 { 809 lockdep_assert_held(&enable_lock); 810 811 if (!core) 812 return; 813 814 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name)) 815 return; 816 817 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, 818 "Disabling critical %s\n", core->name)) 819 return; 820 821 if (--core->enable_count > 0) 822 return; 823 824 trace_clk_disable_rcuidle(core); 825 826 if (core->ops->disable) 827 core->ops->disable(core->hw); 828 829 trace_clk_disable_complete_rcuidle(core); 830 831 clk_core_disable(core->parent); 832 } 833 834 static void clk_core_disable_lock(struct clk_core *core) 835 { 836 unsigned long flags; 837 838 flags = clk_enable_lock(); 839 clk_core_disable(core); 840 clk_enable_unlock(flags); 841 } 842 843 /** 844 * clk_disable - gate a clock 845 * @clk: the clk being gated 846 * 847 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 848 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 849 * clk if the operation is fast and will never sleep. One example is a 850 * SoC-internal clk which is controlled via simple register writes. In the 851 * complex case a clk gate operation may require a fast and a slow part. It is 852 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 853 * In fact clk_disable must be called before clk_unprepare. 854 */ 855 void clk_disable(struct clk *clk) 856 { 857 if (IS_ERR_OR_NULL(clk)) 858 return; 859 860 clk_core_disable_lock(clk->core); 861 } 862 EXPORT_SYMBOL_GPL(clk_disable); 863 864 static int clk_core_enable(struct clk_core *core) 865 { 866 int ret = 0; 867 868 lockdep_assert_held(&enable_lock); 869 870 if (!core) 871 return 0; 872 873 if (WARN(core->prepare_count == 0, 874 "Enabling unprepared %s\n", core->name)) 875 return -ESHUTDOWN; 876 877 if (core->enable_count == 0) { 878 ret = clk_core_enable(core->parent); 879 880 if (ret) 881 return ret; 882 883 trace_clk_enable_rcuidle(core); 884 885 if (core->ops->enable) 886 ret = core->ops->enable(core->hw); 887 888 trace_clk_enable_complete_rcuidle(core); 889 890 if (ret) { 891 clk_core_disable(core->parent); 892 return ret; 893 } 894 } 895 896 core->enable_count++; 897 return 0; 898 } 899 900 static int clk_core_enable_lock(struct clk_core *core) 901 { 902 unsigned long flags; 903 int ret; 904 905 flags = clk_enable_lock(); 906 ret = clk_core_enable(core); 907 clk_enable_unlock(flags); 908 909 return ret; 910 } 911 912 /** 913 * clk_enable - ungate a clock 914 * @clk: the clk being ungated 915 * 916 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 917 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 918 * if the operation will never sleep. One example is a SoC-internal clk which 919 * is controlled via simple register writes. In the complex case a clk ungate 920 * operation may require a fast and a slow part. It is this reason that 921 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 922 * must be called before clk_enable. Returns 0 on success, -EERROR 923 * otherwise. 924 */ 925 int clk_enable(struct clk *clk) 926 { 927 if (!clk) 928 return 0; 929 930 return clk_core_enable_lock(clk->core); 931 } 932 EXPORT_SYMBOL_GPL(clk_enable); 933 934 static int clk_core_prepare_enable(struct clk_core *core) 935 { 936 int ret; 937 938 ret = clk_core_prepare_lock(core); 939 if (ret) 940 return ret; 941 942 ret = clk_core_enable_lock(core); 943 if (ret) 944 clk_core_unprepare_lock(core); 945 946 return ret; 947 } 948 949 static void clk_core_disable_unprepare(struct clk_core *core) 950 { 951 clk_core_disable_lock(core); 952 clk_core_unprepare_lock(core); 953 } 954 955 static void clk_unprepare_unused_subtree(struct clk_core *core) 956 { 957 struct clk_core *child; 958 959 lockdep_assert_held(&prepare_lock); 960 961 hlist_for_each_entry(child, &core->children, child_node) 962 clk_unprepare_unused_subtree(child); 963 964 if (core->prepare_count) 965 return; 966 967 if (core->flags & CLK_IGNORE_UNUSED) 968 return; 969 970 if (clk_pm_runtime_get(core)) 971 return; 972 973 if (clk_core_is_prepared(core)) { 974 trace_clk_unprepare(core); 975 if (core->ops->unprepare_unused) 976 core->ops->unprepare_unused(core->hw); 977 else if (core->ops->unprepare) 978 core->ops->unprepare(core->hw); 979 trace_clk_unprepare_complete(core); 980 } 981 982 clk_pm_runtime_put(core); 983 } 984 985 static void clk_disable_unused_subtree(struct clk_core *core) 986 { 987 struct clk_core *child; 988 unsigned long flags; 989 990 lockdep_assert_held(&prepare_lock); 991 992 hlist_for_each_entry(child, &core->children, child_node) 993 clk_disable_unused_subtree(child); 994 995 if (core->flags & CLK_OPS_PARENT_ENABLE) 996 clk_core_prepare_enable(core->parent); 997 998 if (clk_pm_runtime_get(core)) 999 goto unprepare_out; 1000 1001 flags = clk_enable_lock(); 1002 1003 if (core->enable_count) 1004 goto unlock_out; 1005 1006 if (core->flags & CLK_IGNORE_UNUSED) 1007 goto unlock_out; 1008 1009 /* 1010 * some gate clocks have special needs during the disable-unused 1011 * sequence. call .disable_unused if available, otherwise fall 1012 * back to .disable 1013 */ 1014 if (clk_core_is_enabled(core)) { 1015 trace_clk_disable(core); 1016 if (core->ops->disable_unused) 1017 core->ops->disable_unused(core->hw); 1018 else if (core->ops->disable) 1019 core->ops->disable(core->hw); 1020 trace_clk_disable_complete(core); 1021 } 1022 1023 unlock_out: 1024 clk_enable_unlock(flags); 1025 clk_pm_runtime_put(core); 1026 unprepare_out: 1027 if (core->flags & CLK_OPS_PARENT_ENABLE) 1028 clk_core_disable_unprepare(core->parent); 1029 } 1030 1031 static bool clk_ignore_unused; 1032 static int __init clk_ignore_unused_setup(char *__unused) 1033 { 1034 clk_ignore_unused = true; 1035 return 1; 1036 } 1037 __setup("clk_ignore_unused", clk_ignore_unused_setup); 1038 1039 static int clk_disable_unused(void) 1040 { 1041 struct clk_core *core; 1042 1043 if (clk_ignore_unused) { 1044 pr_warn("clk: Not disabling unused clocks\n"); 1045 return 0; 1046 } 1047 1048 clk_prepare_lock(); 1049 1050 hlist_for_each_entry(core, &clk_root_list, child_node) 1051 clk_disable_unused_subtree(core); 1052 1053 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1054 clk_disable_unused_subtree(core); 1055 1056 hlist_for_each_entry(core, &clk_root_list, child_node) 1057 clk_unprepare_unused_subtree(core); 1058 1059 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1060 clk_unprepare_unused_subtree(core); 1061 1062 clk_prepare_unlock(); 1063 1064 return 0; 1065 } 1066 late_initcall_sync(clk_disable_unused); 1067 1068 static int clk_core_determine_round_nolock(struct clk_core *core, 1069 struct clk_rate_request *req) 1070 { 1071 long rate; 1072 1073 lockdep_assert_held(&prepare_lock); 1074 1075 if (!core) 1076 return 0; 1077 1078 /* 1079 * At this point, core protection will be disabled if 1080 * - if the provider is not protected at all 1081 * - if the calling consumer is the only one which has exclusivity 1082 * over the provider 1083 */ 1084 if (clk_core_rate_is_protected(core)) { 1085 req->rate = core->rate; 1086 } else if (core->ops->determine_rate) { 1087 return core->ops->determine_rate(core->hw, req); 1088 } else if (core->ops->round_rate) { 1089 rate = core->ops->round_rate(core->hw, req->rate, 1090 &req->best_parent_rate); 1091 if (rate < 0) 1092 return rate; 1093 1094 req->rate = rate; 1095 } else { 1096 return -EINVAL; 1097 } 1098 1099 return 0; 1100 } 1101 1102 static void clk_core_init_rate_req(struct clk_core * const core, 1103 struct clk_rate_request *req) 1104 { 1105 struct clk_core *parent; 1106 1107 if (WARN_ON(!core || !req)) 1108 return; 1109 1110 parent = core->parent; 1111 if (parent) { 1112 req->best_parent_hw = parent->hw; 1113 req->best_parent_rate = parent->rate; 1114 } else { 1115 req->best_parent_hw = NULL; 1116 req->best_parent_rate = 0; 1117 } 1118 } 1119 1120 static bool clk_core_can_round(struct clk_core * const core) 1121 { 1122 if (core->ops->determine_rate || core->ops->round_rate) 1123 return true; 1124 1125 return false; 1126 } 1127 1128 static int clk_core_round_rate_nolock(struct clk_core *core, 1129 struct clk_rate_request *req) 1130 { 1131 lockdep_assert_held(&prepare_lock); 1132 1133 if (!core) { 1134 req->rate = 0; 1135 return 0; 1136 } 1137 1138 clk_core_init_rate_req(core, req); 1139 1140 if (clk_core_can_round(core)) 1141 return clk_core_determine_round_nolock(core, req); 1142 else if (core->flags & CLK_SET_RATE_PARENT) 1143 return clk_core_round_rate_nolock(core->parent, req); 1144 1145 req->rate = core->rate; 1146 return 0; 1147 } 1148 1149 /** 1150 * __clk_determine_rate - get the closest rate actually supported by a clock 1151 * @hw: determine the rate of this clock 1152 * @req: target rate request 1153 * 1154 * Useful for clk_ops such as .set_rate and .determine_rate. 1155 */ 1156 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 1157 { 1158 if (!hw) { 1159 req->rate = 0; 1160 return 0; 1161 } 1162 1163 return clk_core_round_rate_nolock(hw->core, req); 1164 } 1165 EXPORT_SYMBOL_GPL(__clk_determine_rate); 1166 1167 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) 1168 { 1169 int ret; 1170 struct clk_rate_request req; 1171 1172 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); 1173 req.rate = rate; 1174 1175 ret = clk_core_round_rate_nolock(hw->core, &req); 1176 if (ret) 1177 return 0; 1178 1179 return req.rate; 1180 } 1181 EXPORT_SYMBOL_GPL(clk_hw_round_rate); 1182 1183 /** 1184 * clk_round_rate - round the given rate for a clk 1185 * @clk: the clk for which we are rounding a rate 1186 * @rate: the rate which is to be rounded 1187 * 1188 * Takes in a rate as input and rounds it to a rate that the clk can actually 1189 * use which is then returned. If clk doesn't support round_rate operation 1190 * then the parent rate is returned. 1191 */ 1192 long clk_round_rate(struct clk *clk, unsigned long rate) 1193 { 1194 struct clk_rate_request req; 1195 int ret; 1196 1197 if (!clk) 1198 return 0; 1199 1200 clk_prepare_lock(); 1201 1202 if (clk->exclusive_count) 1203 clk_core_rate_unprotect(clk->core); 1204 1205 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); 1206 req.rate = rate; 1207 1208 ret = clk_core_round_rate_nolock(clk->core, &req); 1209 1210 if (clk->exclusive_count) 1211 clk_core_rate_protect(clk->core); 1212 1213 clk_prepare_unlock(); 1214 1215 if (ret) 1216 return ret; 1217 1218 return req.rate; 1219 } 1220 EXPORT_SYMBOL_GPL(clk_round_rate); 1221 1222 /** 1223 * __clk_notify - call clk notifier chain 1224 * @core: clk that is changing rate 1225 * @msg: clk notifier type (see include/linux/clk.h) 1226 * @old_rate: old clk rate 1227 * @new_rate: new clk rate 1228 * 1229 * Triggers a notifier call chain on the clk rate-change notification 1230 * for 'clk'. Passes a pointer to the struct clk and the previous 1231 * and current rates to the notifier callback. Intended to be called by 1232 * internal clock code only. Returns NOTIFY_DONE from the last driver 1233 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1234 * a driver returns that. 1235 */ 1236 static int __clk_notify(struct clk_core *core, unsigned long msg, 1237 unsigned long old_rate, unsigned long new_rate) 1238 { 1239 struct clk_notifier *cn; 1240 struct clk_notifier_data cnd; 1241 int ret = NOTIFY_DONE; 1242 1243 cnd.old_rate = old_rate; 1244 cnd.new_rate = new_rate; 1245 1246 list_for_each_entry(cn, &clk_notifier_list, node) { 1247 if (cn->clk->core == core) { 1248 cnd.clk = cn->clk; 1249 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1250 &cnd); 1251 if (ret & NOTIFY_STOP_MASK) 1252 return ret; 1253 } 1254 } 1255 1256 return ret; 1257 } 1258 1259 /** 1260 * __clk_recalc_accuracies 1261 * @core: first clk in the subtree 1262 * 1263 * Walks the subtree of clks starting with clk and recalculates accuracies as 1264 * it goes. Note that if a clk does not implement the .recalc_accuracy 1265 * callback then it is assumed that the clock will take on the accuracy of its 1266 * parent. 1267 */ 1268 static void __clk_recalc_accuracies(struct clk_core *core) 1269 { 1270 unsigned long parent_accuracy = 0; 1271 struct clk_core *child; 1272 1273 lockdep_assert_held(&prepare_lock); 1274 1275 if (core->parent) 1276 parent_accuracy = core->parent->accuracy; 1277 1278 if (core->ops->recalc_accuracy) 1279 core->accuracy = core->ops->recalc_accuracy(core->hw, 1280 parent_accuracy); 1281 else 1282 core->accuracy = parent_accuracy; 1283 1284 hlist_for_each_entry(child, &core->children, child_node) 1285 __clk_recalc_accuracies(child); 1286 } 1287 1288 static long clk_core_get_accuracy(struct clk_core *core) 1289 { 1290 unsigned long accuracy; 1291 1292 clk_prepare_lock(); 1293 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) 1294 __clk_recalc_accuracies(core); 1295 1296 accuracy = __clk_get_accuracy(core); 1297 clk_prepare_unlock(); 1298 1299 return accuracy; 1300 } 1301 1302 /** 1303 * clk_get_accuracy - return the accuracy of clk 1304 * @clk: the clk whose accuracy is being returned 1305 * 1306 * Simply returns the cached accuracy of the clk, unless 1307 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be 1308 * issued. 1309 * If clk is NULL then returns 0. 1310 */ 1311 long clk_get_accuracy(struct clk *clk) 1312 { 1313 if (!clk) 1314 return 0; 1315 1316 return clk_core_get_accuracy(clk->core); 1317 } 1318 EXPORT_SYMBOL_GPL(clk_get_accuracy); 1319 1320 static unsigned long clk_recalc(struct clk_core *core, 1321 unsigned long parent_rate) 1322 { 1323 unsigned long rate = parent_rate; 1324 1325 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { 1326 rate = core->ops->recalc_rate(core->hw, parent_rate); 1327 clk_pm_runtime_put(core); 1328 } 1329 return rate; 1330 } 1331 1332 /** 1333 * __clk_recalc_rates 1334 * @core: first clk in the subtree 1335 * @msg: notification type (see include/linux/clk.h) 1336 * 1337 * Walks the subtree of clks starting with clk and recalculates rates as it 1338 * goes. Note that if a clk does not implement the .recalc_rate callback then 1339 * it is assumed that the clock will take on the rate of its parent. 1340 * 1341 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1342 * if necessary. 1343 */ 1344 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) 1345 { 1346 unsigned long old_rate; 1347 unsigned long parent_rate = 0; 1348 struct clk_core *child; 1349 1350 lockdep_assert_held(&prepare_lock); 1351 1352 old_rate = core->rate; 1353 1354 if (core->parent) 1355 parent_rate = core->parent->rate; 1356 1357 core->rate = clk_recalc(core, parent_rate); 1358 1359 /* 1360 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1361 * & ABORT_RATE_CHANGE notifiers 1362 */ 1363 if (core->notifier_count && msg) 1364 __clk_notify(core, msg, old_rate, core->rate); 1365 1366 hlist_for_each_entry(child, &core->children, child_node) 1367 __clk_recalc_rates(child, msg); 1368 } 1369 1370 static unsigned long clk_core_get_rate(struct clk_core *core) 1371 { 1372 unsigned long rate; 1373 1374 clk_prepare_lock(); 1375 1376 if (core && (core->flags & CLK_GET_RATE_NOCACHE)) 1377 __clk_recalc_rates(core, 0); 1378 1379 rate = clk_core_get_rate_nolock(core); 1380 clk_prepare_unlock(); 1381 1382 return rate; 1383 } 1384 1385 /** 1386 * clk_get_rate - return the rate of clk 1387 * @clk: the clk whose rate is being returned 1388 * 1389 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 1390 * is set, which means a recalc_rate will be issued. 1391 * If clk is NULL then returns 0. 1392 */ 1393 unsigned long clk_get_rate(struct clk *clk) 1394 { 1395 if (!clk) 1396 return 0; 1397 1398 return clk_core_get_rate(clk->core); 1399 } 1400 EXPORT_SYMBOL_GPL(clk_get_rate); 1401 1402 static int clk_fetch_parent_index(struct clk_core *core, 1403 struct clk_core *parent) 1404 { 1405 int i; 1406 1407 if (!parent) 1408 return -EINVAL; 1409 1410 for (i = 0; i < core->num_parents; i++) 1411 if (clk_core_get_parent_by_index(core, i) == parent) 1412 return i; 1413 1414 return -EINVAL; 1415 } 1416 1417 /* 1418 * Update the orphan status of @core and all its children. 1419 */ 1420 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) 1421 { 1422 struct clk_core *child; 1423 1424 core->orphan = is_orphan; 1425 1426 hlist_for_each_entry(child, &core->children, child_node) 1427 clk_core_update_orphan_status(child, is_orphan); 1428 } 1429 1430 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) 1431 { 1432 bool was_orphan = core->orphan; 1433 1434 hlist_del(&core->child_node); 1435 1436 if (new_parent) { 1437 bool becomes_orphan = new_parent->orphan; 1438 1439 /* avoid duplicate POST_RATE_CHANGE notifications */ 1440 if (new_parent->new_child == core) 1441 new_parent->new_child = NULL; 1442 1443 hlist_add_head(&core->child_node, &new_parent->children); 1444 1445 if (was_orphan != becomes_orphan) 1446 clk_core_update_orphan_status(core, becomes_orphan); 1447 } else { 1448 hlist_add_head(&core->child_node, &clk_orphan_list); 1449 if (!was_orphan) 1450 clk_core_update_orphan_status(core, true); 1451 } 1452 1453 core->parent = new_parent; 1454 } 1455 1456 static struct clk_core *__clk_set_parent_before(struct clk_core *core, 1457 struct clk_core *parent) 1458 { 1459 unsigned long flags; 1460 struct clk_core *old_parent = core->parent; 1461 1462 /* 1463 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock 1464 * 1465 * 2. Migrate prepare state between parents and prevent race with 1466 * clk_enable(). 1467 * 1468 * If the clock is not prepared, then a race with 1469 * clk_enable/disable() is impossible since we already have the 1470 * prepare lock (future calls to clk_enable() need to be preceded by 1471 * a clk_prepare()). 1472 * 1473 * If the clock is prepared, migrate the prepared state to the new 1474 * parent and also protect against a race with clk_enable() by 1475 * forcing the clock and the new parent on. This ensures that all 1476 * future calls to clk_enable() are practically NOPs with respect to 1477 * hardware and software states. 1478 * 1479 * See also: Comment for clk_set_parent() below. 1480 */ 1481 1482 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ 1483 if (core->flags & CLK_OPS_PARENT_ENABLE) { 1484 clk_core_prepare_enable(old_parent); 1485 clk_core_prepare_enable(parent); 1486 } 1487 1488 /* migrate prepare count if > 0 */ 1489 if (core->prepare_count) { 1490 clk_core_prepare_enable(parent); 1491 clk_core_enable_lock(core); 1492 } 1493 1494 /* update the clk tree topology */ 1495 flags = clk_enable_lock(); 1496 clk_reparent(core, parent); 1497 clk_enable_unlock(flags); 1498 1499 return old_parent; 1500 } 1501 1502 static void __clk_set_parent_after(struct clk_core *core, 1503 struct clk_core *parent, 1504 struct clk_core *old_parent) 1505 { 1506 /* 1507 * Finish the migration of prepare state and undo the changes done 1508 * for preventing a race with clk_enable(). 1509 */ 1510 if (core->prepare_count) { 1511 clk_core_disable_lock(core); 1512 clk_core_disable_unprepare(old_parent); 1513 } 1514 1515 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ 1516 if (core->flags & CLK_OPS_PARENT_ENABLE) { 1517 clk_core_disable_unprepare(parent); 1518 clk_core_disable_unprepare(old_parent); 1519 } 1520 } 1521 1522 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, 1523 u8 p_index) 1524 { 1525 unsigned long flags; 1526 int ret = 0; 1527 struct clk_core *old_parent; 1528 1529 old_parent = __clk_set_parent_before(core, parent); 1530 1531 trace_clk_set_parent(core, parent); 1532 1533 /* change clock input source */ 1534 if (parent && core->ops->set_parent) 1535 ret = core->ops->set_parent(core->hw, p_index); 1536 1537 trace_clk_set_parent_complete(core, parent); 1538 1539 if (ret) { 1540 flags = clk_enable_lock(); 1541 clk_reparent(core, old_parent); 1542 clk_enable_unlock(flags); 1543 __clk_set_parent_after(core, old_parent, parent); 1544 1545 return ret; 1546 } 1547 1548 __clk_set_parent_after(core, parent, old_parent); 1549 1550 return 0; 1551 } 1552 1553 /** 1554 * __clk_speculate_rates 1555 * @core: first clk in the subtree 1556 * @parent_rate: the "future" rate of clk's parent 1557 * 1558 * Walks the subtree of clks starting with clk, speculating rates as it 1559 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 1560 * 1561 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 1562 * pre-rate change notifications and returns early if no clks in the 1563 * subtree have subscribed to the notifications. Note that if a clk does not 1564 * implement the .recalc_rate callback then it is assumed that the clock will 1565 * take on the rate of its parent. 1566 */ 1567 static int __clk_speculate_rates(struct clk_core *core, 1568 unsigned long parent_rate) 1569 { 1570 struct clk_core *child; 1571 unsigned long new_rate; 1572 int ret = NOTIFY_DONE; 1573 1574 lockdep_assert_held(&prepare_lock); 1575 1576 new_rate = clk_recalc(core, parent_rate); 1577 1578 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 1579 if (core->notifier_count) 1580 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); 1581 1582 if (ret & NOTIFY_STOP_MASK) { 1583 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", 1584 __func__, core->name, ret); 1585 goto out; 1586 } 1587 1588 hlist_for_each_entry(child, &core->children, child_node) { 1589 ret = __clk_speculate_rates(child, new_rate); 1590 if (ret & NOTIFY_STOP_MASK) 1591 break; 1592 } 1593 1594 out: 1595 return ret; 1596 } 1597 1598 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, 1599 struct clk_core *new_parent, u8 p_index) 1600 { 1601 struct clk_core *child; 1602 1603 core->new_rate = new_rate; 1604 core->new_parent = new_parent; 1605 core->new_parent_index = p_index; 1606 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 1607 core->new_child = NULL; 1608 if (new_parent && new_parent != core->parent) 1609 new_parent->new_child = core; 1610 1611 hlist_for_each_entry(child, &core->children, child_node) { 1612 child->new_rate = clk_recalc(child, new_rate); 1613 clk_calc_subtree(child, child->new_rate, NULL, 0); 1614 } 1615 } 1616 1617 /* 1618 * calculate the new rates returning the topmost clock that has to be 1619 * changed. 1620 */ 1621 static struct clk_core *clk_calc_new_rates(struct clk_core *core, 1622 unsigned long rate) 1623 { 1624 struct clk_core *top = core; 1625 struct clk_core *old_parent, *parent; 1626 unsigned long best_parent_rate = 0; 1627 unsigned long new_rate; 1628 unsigned long min_rate; 1629 unsigned long max_rate; 1630 int p_index = 0; 1631 long ret; 1632 1633 /* sanity */ 1634 if (IS_ERR_OR_NULL(core)) 1635 return NULL; 1636 1637 /* save parent rate, if it exists */ 1638 parent = old_parent = core->parent; 1639 if (parent) 1640 best_parent_rate = parent->rate; 1641 1642 clk_core_get_boundaries(core, &min_rate, &max_rate); 1643 1644 /* find the closest rate and parent clk/rate */ 1645 if (clk_core_can_round(core)) { 1646 struct clk_rate_request req; 1647 1648 req.rate = rate; 1649 req.min_rate = min_rate; 1650 req.max_rate = max_rate; 1651 1652 clk_core_init_rate_req(core, &req); 1653 1654 ret = clk_core_determine_round_nolock(core, &req); 1655 if (ret < 0) 1656 return NULL; 1657 1658 best_parent_rate = req.best_parent_rate; 1659 new_rate = req.rate; 1660 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; 1661 1662 if (new_rate < min_rate || new_rate > max_rate) 1663 return NULL; 1664 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { 1665 /* pass-through clock without adjustable parent */ 1666 core->new_rate = core->rate; 1667 return NULL; 1668 } else { 1669 /* pass-through clock with adjustable parent */ 1670 top = clk_calc_new_rates(parent, rate); 1671 new_rate = parent->new_rate; 1672 goto out; 1673 } 1674 1675 /* some clocks must be gated to change parent */ 1676 if (parent != old_parent && 1677 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { 1678 pr_debug("%s: %s not gated but wants to reparent\n", 1679 __func__, core->name); 1680 return NULL; 1681 } 1682 1683 /* try finding the new parent index */ 1684 if (parent && core->num_parents > 1) { 1685 p_index = clk_fetch_parent_index(core, parent); 1686 if (p_index < 0) { 1687 pr_debug("%s: clk %s can not be parent of clk %s\n", 1688 __func__, parent->name, core->name); 1689 return NULL; 1690 } 1691 } 1692 1693 if ((core->flags & CLK_SET_RATE_PARENT) && parent && 1694 best_parent_rate != parent->rate) 1695 top = clk_calc_new_rates(parent, best_parent_rate); 1696 1697 out: 1698 clk_calc_subtree(core, new_rate, parent, p_index); 1699 1700 return top; 1701 } 1702 1703 /* 1704 * Notify about rate changes in a subtree. Always walk down the whole tree 1705 * so that in case of an error we can walk down the whole tree again and 1706 * abort the change. 1707 */ 1708 static struct clk_core *clk_propagate_rate_change(struct clk_core *core, 1709 unsigned long event) 1710 { 1711 struct clk_core *child, *tmp_clk, *fail_clk = NULL; 1712 int ret = NOTIFY_DONE; 1713 1714 if (core->rate == core->new_rate) 1715 return NULL; 1716 1717 if (core->notifier_count) { 1718 ret = __clk_notify(core, event, core->rate, core->new_rate); 1719 if (ret & NOTIFY_STOP_MASK) 1720 fail_clk = core; 1721 } 1722 1723 hlist_for_each_entry(child, &core->children, child_node) { 1724 /* Skip children who will be reparented to another clock */ 1725 if (child->new_parent && child->new_parent != core) 1726 continue; 1727 tmp_clk = clk_propagate_rate_change(child, event); 1728 if (tmp_clk) 1729 fail_clk = tmp_clk; 1730 } 1731 1732 /* handle the new child who might not be in core->children yet */ 1733 if (core->new_child) { 1734 tmp_clk = clk_propagate_rate_change(core->new_child, event); 1735 if (tmp_clk) 1736 fail_clk = tmp_clk; 1737 } 1738 1739 return fail_clk; 1740 } 1741 1742 /* 1743 * walk down a subtree and set the new rates notifying the rate 1744 * change on the way 1745 */ 1746 static void clk_change_rate(struct clk_core *core) 1747 { 1748 struct clk_core *child; 1749 struct hlist_node *tmp; 1750 unsigned long old_rate; 1751 unsigned long best_parent_rate = 0; 1752 bool skip_set_rate = false; 1753 struct clk_core *old_parent; 1754 struct clk_core *parent = NULL; 1755 1756 old_rate = core->rate; 1757 1758 if (core->new_parent) { 1759 parent = core->new_parent; 1760 best_parent_rate = core->new_parent->rate; 1761 } else if (core->parent) { 1762 parent = core->parent; 1763 best_parent_rate = core->parent->rate; 1764 } 1765 1766 if (clk_pm_runtime_get(core)) 1767 return; 1768 1769 if (core->flags & CLK_SET_RATE_UNGATE) { 1770 unsigned long flags; 1771 1772 clk_core_prepare(core); 1773 flags = clk_enable_lock(); 1774 clk_core_enable(core); 1775 clk_enable_unlock(flags); 1776 } 1777 1778 if (core->new_parent && core->new_parent != core->parent) { 1779 old_parent = __clk_set_parent_before(core, core->new_parent); 1780 trace_clk_set_parent(core, core->new_parent); 1781 1782 if (core->ops->set_rate_and_parent) { 1783 skip_set_rate = true; 1784 core->ops->set_rate_and_parent(core->hw, core->new_rate, 1785 best_parent_rate, 1786 core->new_parent_index); 1787 } else if (core->ops->set_parent) { 1788 core->ops->set_parent(core->hw, core->new_parent_index); 1789 } 1790 1791 trace_clk_set_parent_complete(core, core->new_parent); 1792 __clk_set_parent_after(core, core->new_parent, old_parent); 1793 } 1794 1795 if (core->flags & CLK_OPS_PARENT_ENABLE) 1796 clk_core_prepare_enable(parent); 1797 1798 trace_clk_set_rate(core, core->new_rate); 1799 1800 if (!skip_set_rate && core->ops->set_rate) 1801 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); 1802 1803 trace_clk_set_rate_complete(core, core->new_rate); 1804 1805 core->rate = clk_recalc(core, best_parent_rate); 1806 1807 if (core->flags & CLK_SET_RATE_UNGATE) { 1808 unsigned long flags; 1809 1810 flags = clk_enable_lock(); 1811 clk_core_disable(core); 1812 clk_enable_unlock(flags); 1813 clk_core_unprepare(core); 1814 } 1815 1816 if (core->flags & CLK_OPS_PARENT_ENABLE) 1817 clk_core_disable_unprepare(parent); 1818 1819 if (core->notifier_count && old_rate != core->rate) 1820 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); 1821 1822 if (core->flags & CLK_RECALC_NEW_RATES) 1823 (void)clk_calc_new_rates(core, core->new_rate); 1824 1825 /* 1826 * Use safe iteration, as change_rate can actually swap parents 1827 * for certain clock types. 1828 */ 1829 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { 1830 /* Skip children who will be reparented to another clock */ 1831 if (child->new_parent && child->new_parent != core) 1832 continue; 1833 clk_change_rate(child); 1834 } 1835 1836 /* handle the new child who might not be in core->children yet */ 1837 if (core->new_child) 1838 clk_change_rate(core->new_child); 1839 1840 clk_pm_runtime_put(core); 1841 } 1842 1843 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, 1844 unsigned long req_rate) 1845 { 1846 int ret, cnt; 1847 struct clk_rate_request req; 1848 1849 lockdep_assert_held(&prepare_lock); 1850 1851 if (!core) 1852 return 0; 1853 1854 /* simulate what the rate would be if it could be freely set */ 1855 cnt = clk_core_rate_nuke_protect(core); 1856 if (cnt < 0) 1857 return cnt; 1858 1859 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); 1860 req.rate = req_rate; 1861 1862 ret = clk_core_round_rate_nolock(core, &req); 1863 1864 /* restore the protection */ 1865 clk_core_rate_restore_protect(core, cnt); 1866 1867 return ret ? 0 : req.rate; 1868 } 1869 1870 static int clk_core_set_rate_nolock(struct clk_core *core, 1871 unsigned long req_rate) 1872 { 1873 struct clk_core *top, *fail_clk; 1874 unsigned long rate; 1875 int ret = 0; 1876 1877 if (!core) 1878 return 0; 1879 1880 rate = clk_core_req_round_rate_nolock(core, req_rate); 1881 1882 /* bail early if nothing to do */ 1883 if (rate == clk_core_get_rate_nolock(core)) 1884 return 0; 1885 1886 /* fail on a direct rate set of a protected provider */ 1887 if (clk_core_rate_is_protected(core)) 1888 return -EBUSY; 1889 1890 if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count) 1891 return -EBUSY; 1892 1893 /* calculate new rates and get the topmost changed clock */ 1894 top = clk_calc_new_rates(core, req_rate); 1895 if (!top) 1896 return -EINVAL; 1897 1898 ret = clk_pm_runtime_get(core); 1899 if (ret) 1900 return ret; 1901 1902 /* notify that we are about to change rates */ 1903 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 1904 if (fail_clk) { 1905 pr_debug("%s: failed to set %s rate\n", __func__, 1906 fail_clk->name); 1907 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 1908 ret = -EBUSY; 1909 goto err; 1910 } 1911 1912 /* change the rates */ 1913 clk_change_rate(top); 1914 1915 core->req_rate = req_rate; 1916 err: 1917 clk_pm_runtime_put(core); 1918 1919 return ret; 1920 } 1921 1922 /** 1923 * clk_set_rate - specify a new rate for clk 1924 * @clk: the clk whose rate is being changed 1925 * @rate: the new rate for clk 1926 * 1927 * In the simplest case clk_set_rate will only adjust the rate of clk. 1928 * 1929 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 1930 * propagate up to clk's parent; whether or not this happens depends on the 1931 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 1932 * after calling .round_rate then upstream parent propagation is ignored. If 1933 * *parent_rate comes back with a new rate for clk's parent then we propagate 1934 * up to clk's parent and set its rate. Upward propagation will continue 1935 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 1936 * .round_rate stops requesting changes to clk's parent_rate. 1937 * 1938 * Rate changes are accomplished via tree traversal that also recalculates the 1939 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 1940 * 1941 * Returns 0 on success, -EERROR otherwise. 1942 */ 1943 int clk_set_rate(struct clk *clk, unsigned long rate) 1944 { 1945 int ret; 1946 1947 if (!clk) 1948 return 0; 1949 1950 /* prevent racing with updates to the clock topology */ 1951 clk_prepare_lock(); 1952 1953 if (clk->exclusive_count) 1954 clk_core_rate_unprotect(clk->core); 1955 1956 ret = clk_core_set_rate_nolock(clk->core, rate); 1957 1958 if (clk->exclusive_count) 1959 clk_core_rate_protect(clk->core); 1960 1961 clk_prepare_unlock(); 1962 1963 return ret; 1964 } 1965 EXPORT_SYMBOL_GPL(clk_set_rate); 1966 1967 /** 1968 * clk_set_rate_exclusive - specify a new rate get exclusive control 1969 * @clk: the clk whose rate is being changed 1970 * @rate: the new rate for clk 1971 * 1972 * This is a combination of clk_set_rate() and clk_rate_exclusive_get() 1973 * within a critical section 1974 * 1975 * This can be used initially to ensure that at least 1 consumer is 1976 * statisfied when several consumers are competing for exclusivity over the 1977 * same clock provider. 1978 * 1979 * The exclusivity is not applied if setting the rate failed. 1980 * 1981 * Calls to clk_rate_exclusive_get() should be balanced with calls to 1982 * clk_rate_exclusive_put(). 1983 * 1984 * Returns 0 on success, -EERROR otherwise. 1985 */ 1986 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 1987 { 1988 int ret; 1989 1990 if (!clk) 1991 return 0; 1992 1993 /* prevent racing with updates to the clock topology */ 1994 clk_prepare_lock(); 1995 1996 /* 1997 * The temporary protection removal is not here, on purpose 1998 * This function is meant to be used instead of clk_rate_protect, 1999 * so before the consumer code path protect the clock provider 2000 */ 2001 2002 ret = clk_core_set_rate_nolock(clk->core, rate); 2003 if (!ret) { 2004 clk_core_rate_protect(clk->core); 2005 clk->exclusive_count++; 2006 } 2007 2008 clk_prepare_unlock(); 2009 2010 return ret; 2011 } 2012 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); 2013 2014 /** 2015 * clk_set_rate_range - set a rate range for a clock source 2016 * @clk: clock source 2017 * @min: desired minimum clock rate in Hz, inclusive 2018 * @max: desired maximum clock rate in Hz, inclusive 2019 * 2020 * Returns success (0) or negative errno. 2021 */ 2022 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) 2023 { 2024 int ret = 0; 2025 unsigned long old_min, old_max, rate; 2026 2027 if (!clk) 2028 return 0; 2029 2030 if (min > max) { 2031 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", 2032 __func__, clk->core->name, clk->dev_id, clk->con_id, 2033 min, max); 2034 return -EINVAL; 2035 } 2036 2037 clk_prepare_lock(); 2038 2039 if (clk->exclusive_count) 2040 clk_core_rate_unprotect(clk->core); 2041 2042 /* Save the current values in case we need to rollback the change */ 2043 old_min = clk->min_rate; 2044 old_max = clk->max_rate; 2045 clk->min_rate = min; 2046 clk->max_rate = max; 2047 2048 rate = clk_core_get_rate_nolock(clk->core); 2049 if (rate < min || rate > max) { 2050 /* 2051 * FIXME: 2052 * We are in bit of trouble here, current rate is outside the 2053 * the requested range. We are going try to request appropriate 2054 * range boundary but there is a catch. It may fail for the 2055 * usual reason (clock broken, clock protected, etc) but also 2056 * because: 2057 * - round_rate() was not favorable and fell on the wrong 2058 * side of the boundary 2059 * - the determine_rate() callback does not really check for 2060 * this corner case when determining the rate 2061 */ 2062 2063 if (rate < min) 2064 rate = min; 2065 else 2066 rate = max; 2067 2068 ret = clk_core_set_rate_nolock(clk->core, rate); 2069 if (ret) { 2070 /* rollback the changes */ 2071 clk->min_rate = old_min; 2072 clk->max_rate = old_max; 2073 } 2074 } 2075 2076 if (clk->exclusive_count) 2077 clk_core_rate_protect(clk->core); 2078 2079 clk_prepare_unlock(); 2080 2081 return ret; 2082 } 2083 EXPORT_SYMBOL_GPL(clk_set_rate_range); 2084 2085 /** 2086 * clk_set_min_rate - set a minimum clock rate for a clock source 2087 * @clk: clock source 2088 * @rate: desired minimum clock rate in Hz, inclusive 2089 * 2090 * Returns success (0) or negative errno. 2091 */ 2092 int clk_set_min_rate(struct clk *clk, unsigned long rate) 2093 { 2094 if (!clk) 2095 return 0; 2096 2097 return clk_set_rate_range(clk, rate, clk->max_rate); 2098 } 2099 EXPORT_SYMBOL_GPL(clk_set_min_rate); 2100 2101 /** 2102 * clk_set_max_rate - set a maximum clock rate for a clock source 2103 * @clk: clock source 2104 * @rate: desired maximum clock rate in Hz, inclusive 2105 * 2106 * Returns success (0) or negative errno. 2107 */ 2108 int clk_set_max_rate(struct clk *clk, unsigned long rate) 2109 { 2110 if (!clk) 2111 return 0; 2112 2113 return clk_set_rate_range(clk, clk->min_rate, rate); 2114 } 2115 EXPORT_SYMBOL_GPL(clk_set_max_rate); 2116 2117 /** 2118 * clk_get_parent - return the parent of a clk 2119 * @clk: the clk whose parent gets returned 2120 * 2121 * Simply returns clk->parent. Returns NULL if clk is NULL. 2122 */ 2123 struct clk *clk_get_parent(struct clk *clk) 2124 { 2125 struct clk *parent; 2126 2127 if (!clk) 2128 return NULL; 2129 2130 clk_prepare_lock(); 2131 /* TODO: Create a per-user clk and change callers to call clk_put */ 2132 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; 2133 clk_prepare_unlock(); 2134 2135 return parent; 2136 } 2137 EXPORT_SYMBOL_GPL(clk_get_parent); 2138 2139 static struct clk_core *__clk_init_parent(struct clk_core *core) 2140 { 2141 u8 index = 0; 2142 2143 if (core->num_parents > 1 && core->ops->get_parent) 2144 index = core->ops->get_parent(core->hw); 2145 2146 return clk_core_get_parent_by_index(core, index); 2147 } 2148 2149 static void clk_core_reparent(struct clk_core *core, 2150 struct clk_core *new_parent) 2151 { 2152 clk_reparent(core, new_parent); 2153 __clk_recalc_accuracies(core); 2154 __clk_recalc_rates(core, POST_RATE_CHANGE); 2155 } 2156 2157 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) 2158 { 2159 if (!hw) 2160 return; 2161 2162 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); 2163 } 2164 2165 /** 2166 * clk_has_parent - check if a clock is a possible parent for another 2167 * @clk: clock source 2168 * @parent: parent clock source 2169 * 2170 * This function can be used in drivers that need to check that a clock can be 2171 * the parent of another without actually changing the parent. 2172 * 2173 * Returns true if @parent is a possible parent for @clk, false otherwise. 2174 */ 2175 bool clk_has_parent(struct clk *clk, struct clk *parent) 2176 { 2177 struct clk_core *core, *parent_core; 2178 2179 /* NULL clocks should be nops, so return success if either is NULL. */ 2180 if (!clk || !parent) 2181 return true; 2182 2183 core = clk->core; 2184 parent_core = parent->core; 2185 2186 /* Optimize for the case where the parent is already the parent. */ 2187 if (core->parent == parent_core) 2188 return true; 2189 2190 return match_string(core->parent_names, core->num_parents, 2191 parent_core->name) >= 0; 2192 } 2193 EXPORT_SYMBOL_GPL(clk_has_parent); 2194 2195 static int clk_core_set_parent_nolock(struct clk_core *core, 2196 struct clk_core *parent) 2197 { 2198 int ret = 0; 2199 int p_index = 0; 2200 unsigned long p_rate = 0; 2201 2202 lockdep_assert_held(&prepare_lock); 2203 2204 if (!core) 2205 return 0; 2206 2207 if (core->parent == parent) 2208 return 0; 2209 2210 /* verify ops for for multi-parent clks */ 2211 if (core->num_parents > 1 && !core->ops->set_parent) 2212 return -EPERM; 2213 2214 /* check that we are allowed to re-parent if the clock is in use */ 2215 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) 2216 return -EBUSY; 2217 2218 if (clk_core_rate_is_protected(core)) 2219 return -EBUSY; 2220 2221 /* try finding the new parent index */ 2222 if (parent) { 2223 p_index = clk_fetch_parent_index(core, parent); 2224 if (p_index < 0) { 2225 pr_debug("%s: clk %s can not be parent of clk %s\n", 2226 __func__, parent->name, core->name); 2227 return p_index; 2228 } 2229 p_rate = parent->rate; 2230 } 2231 2232 ret = clk_pm_runtime_get(core); 2233 if (ret) 2234 return ret; 2235 2236 /* propagate PRE_RATE_CHANGE notifications */ 2237 ret = __clk_speculate_rates(core, p_rate); 2238 2239 /* abort if a driver objects */ 2240 if (ret & NOTIFY_STOP_MASK) 2241 goto runtime_put; 2242 2243 /* do the re-parent */ 2244 ret = __clk_set_parent(core, parent, p_index); 2245 2246 /* propagate rate an accuracy recalculation accordingly */ 2247 if (ret) { 2248 __clk_recalc_rates(core, ABORT_RATE_CHANGE); 2249 } else { 2250 __clk_recalc_rates(core, POST_RATE_CHANGE); 2251 __clk_recalc_accuracies(core); 2252 } 2253 2254 runtime_put: 2255 clk_pm_runtime_put(core); 2256 2257 return ret; 2258 } 2259 2260 /** 2261 * clk_set_parent - switch the parent of a mux clk 2262 * @clk: the mux clk whose input we are switching 2263 * @parent: the new input to clk 2264 * 2265 * Re-parent clk to use parent as its new input source. If clk is in 2266 * prepared state, the clk will get enabled for the duration of this call. If 2267 * that's not acceptable for a specific clk (Eg: the consumer can't handle 2268 * that, the reparenting is glitchy in hardware, etc), use the 2269 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 2270 * 2271 * After successfully changing clk's parent clk_set_parent will update the 2272 * clk topology, sysfs topology and propagate rate recalculation via 2273 * __clk_recalc_rates. 2274 * 2275 * Returns 0 on success, -EERROR otherwise. 2276 */ 2277 int clk_set_parent(struct clk *clk, struct clk *parent) 2278 { 2279 int ret; 2280 2281 if (!clk) 2282 return 0; 2283 2284 clk_prepare_lock(); 2285 2286 if (clk->exclusive_count) 2287 clk_core_rate_unprotect(clk->core); 2288 2289 ret = clk_core_set_parent_nolock(clk->core, 2290 parent ? parent->core : NULL); 2291 2292 if (clk->exclusive_count) 2293 clk_core_rate_protect(clk->core); 2294 2295 clk_prepare_unlock(); 2296 2297 return ret; 2298 } 2299 EXPORT_SYMBOL_GPL(clk_set_parent); 2300 2301 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) 2302 { 2303 int ret = -EINVAL; 2304 2305 lockdep_assert_held(&prepare_lock); 2306 2307 if (!core) 2308 return 0; 2309 2310 if (clk_core_rate_is_protected(core)) 2311 return -EBUSY; 2312 2313 trace_clk_set_phase(core, degrees); 2314 2315 if (core->ops->set_phase) { 2316 ret = core->ops->set_phase(core->hw, degrees); 2317 if (!ret) 2318 core->phase = degrees; 2319 } 2320 2321 trace_clk_set_phase_complete(core, degrees); 2322 2323 return ret; 2324 } 2325 2326 /** 2327 * clk_set_phase - adjust the phase shift of a clock signal 2328 * @clk: clock signal source 2329 * @degrees: number of degrees the signal is shifted 2330 * 2331 * Shifts the phase of a clock signal by the specified 2332 * degrees. Returns 0 on success, -EERROR otherwise. 2333 * 2334 * This function makes no distinction about the input or reference 2335 * signal that we adjust the clock signal phase against. For example 2336 * phase locked-loop clock signal generators we may shift phase with 2337 * respect to feedback clock signal input, but for other cases the 2338 * clock phase may be shifted with respect to some other, unspecified 2339 * signal. 2340 * 2341 * Additionally the concept of phase shift does not propagate through 2342 * the clock tree hierarchy, which sets it apart from clock rates and 2343 * clock accuracy. A parent clock phase attribute does not have an 2344 * impact on the phase attribute of a child clock. 2345 */ 2346 int clk_set_phase(struct clk *clk, int degrees) 2347 { 2348 int ret; 2349 2350 if (!clk) 2351 return 0; 2352 2353 /* sanity check degrees */ 2354 degrees %= 360; 2355 if (degrees < 0) 2356 degrees += 360; 2357 2358 clk_prepare_lock(); 2359 2360 if (clk->exclusive_count) 2361 clk_core_rate_unprotect(clk->core); 2362 2363 ret = clk_core_set_phase_nolock(clk->core, degrees); 2364 2365 if (clk->exclusive_count) 2366 clk_core_rate_protect(clk->core); 2367 2368 clk_prepare_unlock(); 2369 2370 return ret; 2371 } 2372 EXPORT_SYMBOL_GPL(clk_set_phase); 2373 2374 static int clk_core_get_phase(struct clk_core *core) 2375 { 2376 int ret; 2377 2378 clk_prepare_lock(); 2379 /* Always try to update cached phase if possible */ 2380 if (core->ops->get_phase) 2381 core->phase = core->ops->get_phase(core->hw); 2382 ret = core->phase; 2383 clk_prepare_unlock(); 2384 2385 return ret; 2386 } 2387 2388 /** 2389 * clk_get_phase - return the phase shift of a clock signal 2390 * @clk: clock signal source 2391 * 2392 * Returns the phase shift of a clock node in degrees, otherwise returns 2393 * -EERROR. 2394 */ 2395 int clk_get_phase(struct clk *clk) 2396 { 2397 if (!clk) 2398 return 0; 2399 2400 return clk_core_get_phase(clk->core); 2401 } 2402 EXPORT_SYMBOL_GPL(clk_get_phase); 2403 2404 /** 2405 * clk_is_match - check if two clk's point to the same hardware clock 2406 * @p: clk compared against q 2407 * @q: clk compared against p 2408 * 2409 * Returns true if the two struct clk pointers both point to the same hardware 2410 * clock node. Put differently, returns true if struct clk *p and struct clk *q 2411 * share the same struct clk_core object. 2412 * 2413 * Returns false otherwise. Note that two NULL clks are treated as matching. 2414 */ 2415 bool clk_is_match(const struct clk *p, const struct clk *q) 2416 { 2417 /* trivial case: identical struct clk's or both NULL */ 2418 if (p == q) 2419 return true; 2420 2421 /* true if clk->core pointers match. Avoid dereferencing garbage */ 2422 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) 2423 if (p->core == q->core) 2424 return true; 2425 2426 return false; 2427 } 2428 EXPORT_SYMBOL_GPL(clk_is_match); 2429 2430 /*** debugfs support ***/ 2431 2432 #ifdef CONFIG_DEBUG_FS 2433 #include <linux/debugfs.h> 2434 2435 static struct dentry *rootdir; 2436 static int inited = 0; 2437 static DEFINE_MUTEX(clk_debug_lock); 2438 static HLIST_HEAD(clk_debug_list); 2439 2440 static struct hlist_head *all_lists[] = { 2441 &clk_root_list, 2442 &clk_orphan_list, 2443 NULL, 2444 }; 2445 2446 static struct hlist_head *orphan_list[] = { 2447 &clk_orphan_list, 2448 NULL, 2449 }; 2450 2451 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, 2452 int level) 2453 { 2454 if (!c) 2455 return; 2456 2457 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %-3d\n", 2458 level * 3 + 1, "", 2459 30 - level * 3, c->name, 2460 c->enable_count, c->prepare_count, c->protect_count, 2461 clk_core_get_rate(c), clk_core_get_accuracy(c), 2462 clk_core_get_phase(c)); 2463 } 2464 2465 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, 2466 int level) 2467 { 2468 struct clk_core *child; 2469 2470 if (!c) 2471 return; 2472 2473 clk_summary_show_one(s, c, level); 2474 2475 hlist_for_each_entry(child, &c->children, child_node) 2476 clk_summary_show_subtree(s, child, level + 1); 2477 } 2478 2479 static int clk_summary_show(struct seq_file *s, void *data) 2480 { 2481 struct clk_core *c; 2482 struct hlist_head **lists = (struct hlist_head **)s->private; 2483 2484 seq_puts(s, " enable prepare protect \n"); 2485 seq_puts(s, " clock count count count rate accuracy phase\n"); 2486 seq_puts(s, "----------------------------------------------------------------------------------------\n"); 2487 2488 clk_prepare_lock(); 2489 2490 for (; *lists; lists++) 2491 hlist_for_each_entry(c, *lists, child_node) 2492 clk_summary_show_subtree(s, c, 0); 2493 2494 clk_prepare_unlock(); 2495 2496 return 0; 2497 } 2498 DEFINE_SHOW_ATTRIBUTE(clk_summary); 2499 2500 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) 2501 { 2502 if (!c) 2503 return; 2504 2505 /* This should be JSON format, i.e. elements separated with a comma */ 2506 seq_printf(s, "\"%s\": { ", c->name); 2507 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 2508 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 2509 seq_printf(s, "\"protect_count\": %d,", c->protect_count); 2510 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); 2511 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); 2512 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); 2513 } 2514 2515 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) 2516 { 2517 struct clk_core *child; 2518 2519 if (!c) 2520 return; 2521 2522 clk_dump_one(s, c, level); 2523 2524 hlist_for_each_entry(child, &c->children, child_node) { 2525 seq_putc(s, ','); 2526 clk_dump_subtree(s, child, level + 1); 2527 } 2528 2529 seq_putc(s, '}'); 2530 } 2531 2532 static int clk_dump_show(struct seq_file *s, void *data) 2533 { 2534 struct clk_core *c; 2535 bool first_node = true; 2536 struct hlist_head **lists = (struct hlist_head **)s->private; 2537 2538 seq_putc(s, '{'); 2539 clk_prepare_lock(); 2540 2541 for (; *lists; lists++) { 2542 hlist_for_each_entry(c, *lists, child_node) { 2543 if (!first_node) 2544 seq_putc(s, ','); 2545 first_node = false; 2546 clk_dump_subtree(s, c, 0); 2547 } 2548 } 2549 2550 clk_prepare_unlock(); 2551 2552 seq_puts(s, "}\n"); 2553 return 0; 2554 } 2555 DEFINE_SHOW_ATTRIBUTE(clk_dump); 2556 2557 static const struct { 2558 unsigned long flag; 2559 const char *name; 2560 } clk_flags[] = { 2561 #define ENTRY(f) { f, #f } 2562 ENTRY(CLK_SET_RATE_GATE), 2563 ENTRY(CLK_SET_PARENT_GATE), 2564 ENTRY(CLK_SET_RATE_PARENT), 2565 ENTRY(CLK_IGNORE_UNUSED), 2566 ENTRY(CLK_IS_BASIC), 2567 ENTRY(CLK_GET_RATE_NOCACHE), 2568 ENTRY(CLK_SET_RATE_NO_REPARENT), 2569 ENTRY(CLK_GET_ACCURACY_NOCACHE), 2570 ENTRY(CLK_RECALC_NEW_RATES), 2571 ENTRY(CLK_SET_RATE_UNGATE), 2572 ENTRY(CLK_IS_CRITICAL), 2573 ENTRY(CLK_OPS_PARENT_ENABLE), 2574 #undef ENTRY 2575 }; 2576 2577 static int clk_flags_show(struct seq_file *s, void *data) 2578 { 2579 struct clk_core *core = s->private; 2580 unsigned long flags = core->flags; 2581 unsigned int i; 2582 2583 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) { 2584 if (flags & clk_flags[i].flag) { 2585 seq_printf(s, "%s\n", clk_flags[i].name); 2586 flags &= ~clk_flags[i].flag; 2587 } 2588 } 2589 if (flags) { 2590 /* Unknown flags */ 2591 seq_printf(s, "0x%lx\n", flags); 2592 } 2593 2594 return 0; 2595 } 2596 DEFINE_SHOW_ATTRIBUTE(clk_flags); 2597 2598 static int possible_parents_show(struct seq_file *s, void *data) 2599 { 2600 struct clk_core *core = s->private; 2601 int i; 2602 2603 for (i = 0; i < core->num_parents - 1; i++) 2604 seq_printf(s, "%s ", core->parent_names[i]); 2605 2606 seq_printf(s, "%s\n", core->parent_names[i]); 2607 2608 return 0; 2609 } 2610 DEFINE_SHOW_ATTRIBUTE(possible_parents); 2611 2612 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) 2613 { 2614 struct dentry *root; 2615 2616 if (!core || !pdentry) 2617 return; 2618 2619 root = debugfs_create_dir(core->name, pdentry); 2620 core->dentry = root; 2621 2622 debugfs_create_ulong("clk_rate", 0444, root, &core->rate); 2623 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); 2624 debugfs_create_u32("clk_phase", 0444, root, &core->phase); 2625 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); 2626 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); 2627 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); 2628 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); 2629 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); 2630 2631 if (core->num_parents > 1) 2632 debugfs_create_file("clk_possible_parents", 0444, root, core, 2633 &possible_parents_fops); 2634 2635 if (core->ops->debug_init) 2636 core->ops->debug_init(core->hw, core->dentry); 2637 } 2638 2639 /** 2640 * clk_debug_register - add a clk node to the debugfs clk directory 2641 * @core: the clk being added to the debugfs clk directory 2642 * 2643 * Dynamically adds a clk to the debugfs clk directory if debugfs has been 2644 * initialized. Otherwise it bails out early since the debugfs clk directory 2645 * will be created lazily by clk_debug_init as part of a late_initcall. 2646 */ 2647 static void clk_debug_register(struct clk_core *core) 2648 { 2649 mutex_lock(&clk_debug_lock); 2650 hlist_add_head(&core->debug_node, &clk_debug_list); 2651 if (inited) 2652 clk_debug_create_one(core, rootdir); 2653 mutex_unlock(&clk_debug_lock); 2654 } 2655 2656 /** 2657 * clk_debug_unregister - remove a clk node from the debugfs clk directory 2658 * @core: the clk being removed from the debugfs clk directory 2659 * 2660 * Dynamically removes a clk and all its child nodes from the 2661 * debugfs clk directory if clk->dentry points to debugfs created by 2662 * clk_debug_register in __clk_core_init. 2663 */ 2664 static void clk_debug_unregister(struct clk_core *core) 2665 { 2666 mutex_lock(&clk_debug_lock); 2667 hlist_del_init(&core->debug_node); 2668 debugfs_remove_recursive(core->dentry); 2669 core->dentry = NULL; 2670 mutex_unlock(&clk_debug_lock); 2671 } 2672 2673 /** 2674 * clk_debug_init - lazily populate the debugfs clk directory 2675 * 2676 * clks are often initialized very early during boot before memory can be 2677 * dynamically allocated and well before debugfs is setup. This function 2678 * populates the debugfs clk directory once at boot-time when we know that 2679 * debugfs is setup. It should only be called once at boot-time, all other clks 2680 * added dynamically will be done so with clk_debug_register. 2681 */ 2682 static int __init clk_debug_init(void) 2683 { 2684 struct clk_core *core; 2685 2686 rootdir = debugfs_create_dir("clk", NULL); 2687 2688 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, 2689 &clk_summary_fops); 2690 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists, 2691 &clk_dump_fops); 2692 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list, 2693 &clk_summary_fops); 2694 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list, 2695 &clk_dump_fops); 2696 2697 mutex_lock(&clk_debug_lock); 2698 hlist_for_each_entry(core, &clk_debug_list, debug_node) 2699 clk_debug_create_one(core, rootdir); 2700 2701 inited = 1; 2702 mutex_unlock(&clk_debug_lock); 2703 2704 return 0; 2705 } 2706 late_initcall(clk_debug_init); 2707 #else 2708 static inline void clk_debug_register(struct clk_core *core) { } 2709 static inline void clk_debug_reparent(struct clk_core *core, 2710 struct clk_core *new_parent) 2711 { 2712 } 2713 static inline void clk_debug_unregister(struct clk_core *core) 2714 { 2715 } 2716 #endif 2717 2718 /** 2719 * __clk_core_init - initialize the data structures in a struct clk_core 2720 * @core: clk_core being initialized 2721 * 2722 * Initializes the lists in struct clk_core, queries the hardware for the 2723 * parent and rate and sets them both. 2724 */ 2725 static int __clk_core_init(struct clk_core *core) 2726 { 2727 int i, ret; 2728 struct clk_core *orphan; 2729 struct hlist_node *tmp2; 2730 unsigned long rate; 2731 2732 if (!core) 2733 return -EINVAL; 2734 2735 clk_prepare_lock(); 2736 2737 ret = clk_pm_runtime_get(core); 2738 if (ret) 2739 goto unlock; 2740 2741 /* check to see if a clock with this name is already registered */ 2742 if (clk_core_lookup(core->name)) { 2743 pr_debug("%s: clk %s already initialized\n", 2744 __func__, core->name); 2745 ret = -EEXIST; 2746 goto out; 2747 } 2748 2749 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ 2750 if (core->ops->set_rate && 2751 !((core->ops->round_rate || core->ops->determine_rate) && 2752 core->ops->recalc_rate)) { 2753 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 2754 __func__, core->name); 2755 ret = -EINVAL; 2756 goto out; 2757 } 2758 2759 if (core->ops->set_parent && !core->ops->get_parent) { 2760 pr_err("%s: %s must implement .get_parent & .set_parent\n", 2761 __func__, core->name); 2762 ret = -EINVAL; 2763 goto out; 2764 } 2765 2766 if (core->num_parents > 1 && !core->ops->get_parent) { 2767 pr_err("%s: %s must implement .get_parent as it has multi parents\n", 2768 __func__, core->name); 2769 ret = -EINVAL; 2770 goto out; 2771 } 2772 2773 if (core->ops->set_rate_and_parent && 2774 !(core->ops->set_parent && core->ops->set_rate)) { 2775 pr_err("%s: %s must implement .set_parent & .set_rate\n", 2776 __func__, core->name); 2777 ret = -EINVAL; 2778 goto out; 2779 } 2780 2781 /* throw a WARN if any entries in parent_names are NULL */ 2782 for (i = 0; i < core->num_parents; i++) 2783 WARN(!core->parent_names[i], 2784 "%s: invalid NULL in %s's .parent_names\n", 2785 __func__, core->name); 2786 2787 core->parent = __clk_init_parent(core); 2788 2789 /* 2790 * Populate core->parent if parent has already been clk_core_init'd. If 2791 * parent has not yet been clk_core_init'd then place clk in the orphan 2792 * list. If clk doesn't have any parents then place it in the root 2793 * clk list. 2794 * 2795 * Every time a new clk is clk_init'd then we walk the list of orphan 2796 * clocks and re-parent any that are children of the clock currently 2797 * being clk_init'd. 2798 */ 2799 if (core->parent) { 2800 hlist_add_head(&core->child_node, 2801 &core->parent->children); 2802 core->orphan = core->parent->orphan; 2803 } else if (!core->num_parents) { 2804 hlist_add_head(&core->child_node, &clk_root_list); 2805 core->orphan = false; 2806 } else { 2807 hlist_add_head(&core->child_node, &clk_orphan_list); 2808 core->orphan = true; 2809 } 2810 2811 /* 2812 * optional platform-specific magic 2813 * 2814 * The .init callback is not used by any of the basic clock types, but 2815 * exists for weird hardware that must perform initialization magic. 2816 * Please consider other ways of solving initialization problems before 2817 * using this callback, as its use is discouraged. 2818 */ 2819 if (core->ops->init) 2820 core->ops->init(core->hw); 2821 2822 /* 2823 * Set clk's accuracy. The preferred method is to use 2824 * .recalc_accuracy. For simple clocks and lazy developers the default 2825 * fallback is to use the parent's accuracy. If a clock doesn't have a 2826 * parent (or is orphaned) then accuracy is set to zero (perfect 2827 * clock). 2828 */ 2829 if (core->ops->recalc_accuracy) 2830 core->accuracy = core->ops->recalc_accuracy(core->hw, 2831 __clk_get_accuracy(core->parent)); 2832 else if (core->parent) 2833 core->accuracy = core->parent->accuracy; 2834 else 2835 core->accuracy = 0; 2836 2837 /* 2838 * Set clk's phase. 2839 * Since a phase is by definition relative to its parent, just 2840 * query the current clock phase, or just assume it's in phase. 2841 */ 2842 if (core->ops->get_phase) 2843 core->phase = core->ops->get_phase(core->hw); 2844 else 2845 core->phase = 0; 2846 2847 /* 2848 * Set clk's rate. The preferred method is to use .recalc_rate. For 2849 * simple clocks and lazy developers the default fallback is to use the 2850 * parent's rate. If a clock doesn't have a parent (or is orphaned) 2851 * then rate is set to zero. 2852 */ 2853 if (core->ops->recalc_rate) 2854 rate = core->ops->recalc_rate(core->hw, 2855 clk_core_get_rate_nolock(core->parent)); 2856 else if (core->parent) 2857 rate = core->parent->rate; 2858 else 2859 rate = 0; 2860 core->rate = core->req_rate = rate; 2861 2862 /* 2863 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks 2864 * don't get accidentally disabled when walking the orphan tree and 2865 * reparenting clocks 2866 */ 2867 if (core->flags & CLK_IS_CRITICAL) { 2868 unsigned long flags; 2869 2870 clk_core_prepare(core); 2871 2872 flags = clk_enable_lock(); 2873 clk_core_enable(core); 2874 clk_enable_unlock(flags); 2875 } 2876 2877 /* 2878 * walk the list of orphan clocks and reparent any that newly finds a 2879 * parent. 2880 */ 2881 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 2882 struct clk_core *parent = __clk_init_parent(orphan); 2883 2884 /* 2885 * We need to use __clk_set_parent_before() and _after() to 2886 * to properly migrate any prepare/enable count of the orphan 2887 * clock. This is important for CLK_IS_CRITICAL clocks, which 2888 * are enabled during init but might not have a parent yet. 2889 */ 2890 if (parent) { 2891 /* update the clk tree topology */ 2892 __clk_set_parent_before(orphan, parent); 2893 __clk_set_parent_after(orphan, parent, NULL); 2894 __clk_recalc_accuracies(orphan); 2895 __clk_recalc_rates(orphan, 0); 2896 } 2897 } 2898 2899 kref_init(&core->ref); 2900 out: 2901 clk_pm_runtime_put(core); 2902 unlock: 2903 clk_prepare_unlock(); 2904 2905 if (!ret) 2906 clk_debug_register(core); 2907 2908 return ret; 2909 } 2910 2911 struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, 2912 const char *con_id) 2913 { 2914 struct clk *clk; 2915 2916 /* This is to allow this function to be chained to others */ 2917 if (IS_ERR_OR_NULL(hw)) 2918 return ERR_CAST(hw); 2919 2920 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 2921 if (!clk) 2922 return ERR_PTR(-ENOMEM); 2923 2924 clk->core = hw->core; 2925 clk->dev_id = dev_id; 2926 clk->con_id = kstrdup_const(con_id, GFP_KERNEL); 2927 clk->max_rate = ULONG_MAX; 2928 2929 clk_prepare_lock(); 2930 hlist_add_head(&clk->clks_node, &hw->core->clks); 2931 clk_prepare_unlock(); 2932 2933 return clk; 2934 } 2935 2936 void __clk_free_clk(struct clk *clk) 2937 { 2938 clk_prepare_lock(); 2939 hlist_del(&clk->clks_node); 2940 clk_prepare_unlock(); 2941 2942 kfree_const(clk->con_id); 2943 kfree(clk); 2944 } 2945 2946 /** 2947 * clk_register - allocate a new clock, register it and return an opaque cookie 2948 * @dev: device that is registering this clock 2949 * @hw: link to hardware-specific clock data 2950 * 2951 * clk_register is the primary interface for populating the clock tree with new 2952 * clock nodes. It returns a pointer to the newly allocated struct clk which 2953 * cannot be dereferenced by driver code but may be used in conjunction with the 2954 * rest of the clock API. In the event of an error clk_register will return an 2955 * error code; drivers must test for an error code after calling clk_register. 2956 */ 2957 struct clk *clk_register(struct device *dev, struct clk_hw *hw) 2958 { 2959 int i, ret; 2960 struct clk_core *core; 2961 2962 core = kzalloc(sizeof(*core), GFP_KERNEL); 2963 if (!core) { 2964 ret = -ENOMEM; 2965 goto fail_out; 2966 } 2967 2968 core->name = kstrdup_const(hw->init->name, GFP_KERNEL); 2969 if (!core->name) { 2970 ret = -ENOMEM; 2971 goto fail_name; 2972 } 2973 2974 if (WARN_ON(!hw->init->ops)) { 2975 ret = -EINVAL; 2976 goto fail_ops; 2977 } 2978 core->ops = hw->init->ops; 2979 2980 if (dev && pm_runtime_enabled(dev)) 2981 core->dev = dev; 2982 if (dev && dev->driver) 2983 core->owner = dev->driver->owner; 2984 core->hw = hw; 2985 core->flags = hw->init->flags; 2986 core->num_parents = hw->init->num_parents; 2987 core->min_rate = 0; 2988 core->max_rate = ULONG_MAX; 2989 hw->core = core; 2990 2991 /* allocate local copy in case parent_names is __initdata */ 2992 core->parent_names = kcalloc(core->num_parents, sizeof(char *), 2993 GFP_KERNEL); 2994 2995 if (!core->parent_names) { 2996 ret = -ENOMEM; 2997 goto fail_parent_names; 2998 } 2999 3000 3001 /* copy each string name in case parent_names is __initdata */ 3002 for (i = 0; i < core->num_parents; i++) { 3003 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i], 3004 GFP_KERNEL); 3005 if (!core->parent_names[i]) { 3006 ret = -ENOMEM; 3007 goto fail_parent_names_copy; 3008 } 3009 } 3010 3011 /* avoid unnecessary string look-ups of clk_core's possible parents. */ 3012 core->parents = kcalloc(core->num_parents, sizeof(*core->parents), 3013 GFP_KERNEL); 3014 if (!core->parents) { 3015 ret = -ENOMEM; 3016 goto fail_parents; 3017 }; 3018 3019 INIT_HLIST_HEAD(&core->clks); 3020 3021 hw->clk = __clk_create_clk(hw, NULL, NULL); 3022 if (IS_ERR(hw->clk)) { 3023 ret = PTR_ERR(hw->clk); 3024 goto fail_parents; 3025 } 3026 3027 ret = __clk_core_init(core); 3028 if (!ret) 3029 return hw->clk; 3030 3031 __clk_free_clk(hw->clk); 3032 hw->clk = NULL; 3033 3034 fail_parents: 3035 kfree(core->parents); 3036 fail_parent_names_copy: 3037 while (--i >= 0) 3038 kfree_const(core->parent_names[i]); 3039 kfree(core->parent_names); 3040 fail_parent_names: 3041 fail_ops: 3042 kfree_const(core->name); 3043 fail_name: 3044 kfree(core); 3045 fail_out: 3046 return ERR_PTR(ret); 3047 } 3048 EXPORT_SYMBOL_GPL(clk_register); 3049 3050 /** 3051 * clk_hw_register - register a clk_hw and return an error code 3052 * @dev: device that is registering this clock 3053 * @hw: link to hardware-specific clock data 3054 * 3055 * clk_hw_register is the primary interface for populating the clock tree with 3056 * new clock nodes. It returns an integer equal to zero indicating success or 3057 * less than zero indicating failure. Drivers must test for an error code after 3058 * calling clk_hw_register(). 3059 */ 3060 int clk_hw_register(struct device *dev, struct clk_hw *hw) 3061 { 3062 return PTR_ERR_OR_ZERO(clk_register(dev, hw)); 3063 } 3064 EXPORT_SYMBOL_GPL(clk_hw_register); 3065 3066 /* Free memory allocated for a clock. */ 3067 static void __clk_release(struct kref *ref) 3068 { 3069 struct clk_core *core = container_of(ref, struct clk_core, ref); 3070 int i = core->num_parents; 3071 3072 lockdep_assert_held(&prepare_lock); 3073 3074 kfree(core->parents); 3075 while (--i >= 0) 3076 kfree_const(core->parent_names[i]); 3077 3078 kfree(core->parent_names); 3079 kfree_const(core->name); 3080 kfree(core); 3081 } 3082 3083 /* 3084 * Empty clk_ops for unregistered clocks. These are used temporarily 3085 * after clk_unregister() was called on a clock and until last clock 3086 * consumer calls clk_put() and the struct clk object is freed. 3087 */ 3088 static int clk_nodrv_prepare_enable(struct clk_hw *hw) 3089 { 3090 return -ENXIO; 3091 } 3092 3093 static void clk_nodrv_disable_unprepare(struct clk_hw *hw) 3094 { 3095 WARN_ON_ONCE(1); 3096 } 3097 3098 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, 3099 unsigned long parent_rate) 3100 { 3101 return -ENXIO; 3102 } 3103 3104 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) 3105 { 3106 return -ENXIO; 3107 } 3108 3109 static const struct clk_ops clk_nodrv_ops = { 3110 .enable = clk_nodrv_prepare_enable, 3111 .disable = clk_nodrv_disable_unprepare, 3112 .prepare = clk_nodrv_prepare_enable, 3113 .unprepare = clk_nodrv_disable_unprepare, 3114 .set_rate = clk_nodrv_set_rate, 3115 .set_parent = clk_nodrv_set_parent, 3116 }; 3117 3118 /** 3119 * clk_unregister - unregister a currently registered clock 3120 * @clk: clock to unregister 3121 */ 3122 void clk_unregister(struct clk *clk) 3123 { 3124 unsigned long flags; 3125 3126 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 3127 return; 3128 3129 clk_debug_unregister(clk->core); 3130 3131 clk_prepare_lock(); 3132 3133 if (clk->core->ops == &clk_nodrv_ops) { 3134 pr_err("%s: unregistered clock: %s\n", __func__, 3135 clk->core->name); 3136 goto unlock; 3137 } 3138 /* 3139 * Assign empty clock ops for consumers that might still hold 3140 * a reference to this clock. 3141 */ 3142 flags = clk_enable_lock(); 3143 clk->core->ops = &clk_nodrv_ops; 3144 clk_enable_unlock(flags); 3145 3146 if (!hlist_empty(&clk->core->children)) { 3147 struct clk_core *child; 3148 struct hlist_node *t; 3149 3150 /* Reparent all children to the orphan list. */ 3151 hlist_for_each_entry_safe(child, t, &clk->core->children, 3152 child_node) 3153 clk_core_set_parent_nolock(child, NULL); 3154 } 3155 3156 hlist_del_init(&clk->core->child_node); 3157 3158 if (clk->core->prepare_count) 3159 pr_warn("%s: unregistering prepared clock: %s\n", 3160 __func__, clk->core->name); 3161 3162 if (clk->core->protect_count) 3163 pr_warn("%s: unregistering protected clock: %s\n", 3164 __func__, clk->core->name); 3165 3166 kref_put(&clk->core->ref, __clk_release); 3167 unlock: 3168 clk_prepare_unlock(); 3169 } 3170 EXPORT_SYMBOL_GPL(clk_unregister); 3171 3172 /** 3173 * clk_hw_unregister - unregister a currently registered clk_hw 3174 * @hw: hardware-specific clock data to unregister 3175 */ 3176 void clk_hw_unregister(struct clk_hw *hw) 3177 { 3178 clk_unregister(hw->clk); 3179 } 3180 EXPORT_SYMBOL_GPL(clk_hw_unregister); 3181 3182 static void devm_clk_release(struct device *dev, void *res) 3183 { 3184 clk_unregister(*(struct clk **)res); 3185 } 3186 3187 static void devm_clk_hw_release(struct device *dev, void *res) 3188 { 3189 clk_hw_unregister(*(struct clk_hw **)res); 3190 } 3191 3192 /** 3193 * devm_clk_register - resource managed clk_register() 3194 * @dev: device that is registering this clock 3195 * @hw: link to hardware-specific clock data 3196 * 3197 * Managed clk_register(). Clocks returned from this function are 3198 * automatically clk_unregister()ed on driver detach. See clk_register() for 3199 * more information. 3200 */ 3201 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 3202 { 3203 struct clk *clk; 3204 struct clk **clkp; 3205 3206 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); 3207 if (!clkp) 3208 return ERR_PTR(-ENOMEM); 3209 3210 clk = clk_register(dev, hw); 3211 if (!IS_ERR(clk)) { 3212 *clkp = clk; 3213 devres_add(dev, clkp); 3214 } else { 3215 devres_free(clkp); 3216 } 3217 3218 return clk; 3219 } 3220 EXPORT_SYMBOL_GPL(devm_clk_register); 3221 3222 /** 3223 * devm_clk_hw_register - resource managed clk_hw_register() 3224 * @dev: device that is registering this clock 3225 * @hw: link to hardware-specific clock data 3226 * 3227 * Managed clk_hw_register(). Clocks registered by this function are 3228 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register() 3229 * for more information. 3230 */ 3231 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw) 3232 { 3233 struct clk_hw **hwp; 3234 int ret; 3235 3236 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL); 3237 if (!hwp) 3238 return -ENOMEM; 3239 3240 ret = clk_hw_register(dev, hw); 3241 if (!ret) { 3242 *hwp = hw; 3243 devres_add(dev, hwp); 3244 } else { 3245 devres_free(hwp); 3246 } 3247 3248 return ret; 3249 } 3250 EXPORT_SYMBOL_GPL(devm_clk_hw_register); 3251 3252 static int devm_clk_match(struct device *dev, void *res, void *data) 3253 { 3254 struct clk *c = res; 3255 if (WARN_ON(!c)) 3256 return 0; 3257 return c == data; 3258 } 3259 3260 static int devm_clk_hw_match(struct device *dev, void *res, void *data) 3261 { 3262 struct clk_hw *hw = res; 3263 3264 if (WARN_ON(!hw)) 3265 return 0; 3266 return hw == data; 3267 } 3268 3269 /** 3270 * devm_clk_unregister - resource managed clk_unregister() 3271 * @clk: clock to unregister 3272 * 3273 * Deallocate a clock allocated with devm_clk_register(). Normally 3274 * this function will not need to be called and the resource management 3275 * code will ensure that the resource is freed. 3276 */ 3277 void devm_clk_unregister(struct device *dev, struct clk *clk) 3278 { 3279 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); 3280 } 3281 EXPORT_SYMBOL_GPL(devm_clk_unregister); 3282 3283 /** 3284 * devm_clk_hw_unregister - resource managed clk_hw_unregister() 3285 * @dev: device that is unregistering the hardware-specific clock data 3286 * @hw: link to hardware-specific clock data 3287 * 3288 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally 3289 * this function will not need to be called and the resource management 3290 * code will ensure that the resource is freed. 3291 */ 3292 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw) 3293 { 3294 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match, 3295 hw)); 3296 } 3297 EXPORT_SYMBOL_GPL(devm_clk_hw_unregister); 3298 3299 /* 3300 * clkdev helpers 3301 */ 3302 int __clk_get(struct clk *clk) 3303 { 3304 struct clk_core *core = !clk ? NULL : clk->core; 3305 3306 if (core) { 3307 if (!try_module_get(core->owner)) 3308 return 0; 3309 3310 kref_get(&core->ref); 3311 } 3312 return 1; 3313 } 3314 3315 void __clk_put(struct clk *clk) 3316 { 3317 struct module *owner; 3318 3319 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 3320 return; 3321 3322 clk_prepare_lock(); 3323 3324 /* 3325 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a 3326 * given user should be balanced with calls to clk_rate_exclusive_put() 3327 * and by that same consumer 3328 */ 3329 if (WARN_ON(clk->exclusive_count)) { 3330 /* We voiced our concern, let's sanitize the situation */ 3331 clk->core->protect_count -= (clk->exclusive_count - 1); 3332 clk_core_rate_unprotect(clk->core); 3333 clk->exclusive_count = 0; 3334 } 3335 3336 hlist_del(&clk->clks_node); 3337 if (clk->min_rate > clk->core->req_rate || 3338 clk->max_rate < clk->core->req_rate) 3339 clk_core_set_rate_nolock(clk->core, clk->core->req_rate); 3340 3341 owner = clk->core->owner; 3342 kref_put(&clk->core->ref, __clk_release); 3343 3344 clk_prepare_unlock(); 3345 3346 module_put(owner); 3347 3348 kfree(clk); 3349 } 3350 3351 /*** clk rate change notifiers ***/ 3352 3353 /** 3354 * clk_notifier_register - add a clk rate change notifier 3355 * @clk: struct clk * to watch 3356 * @nb: struct notifier_block * with callback info 3357 * 3358 * Request notification when clk's rate changes. This uses an SRCU 3359 * notifier because we want it to block and notifier unregistrations are 3360 * uncommon. The callbacks associated with the notifier must not 3361 * re-enter into the clk framework by calling any top-level clk APIs; 3362 * this will cause a nested prepare_lock mutex. 3363 * 3364 * In all notification cases (pre, post and abort rate change) the original 3365 * clock rate is passed to the callback via struct clk_notifier_data.old_rate 3366 * and the new frequency is passed via struct clk_notifier_data.new_rate. 3367 * 3368 * clk_notifier_register() must be called from non-atomic context. 3369 * Returns -EINVAL if called with null arguments, -ENOMEM upon 3370 * allocation failure; otherwise, passes along the return value of 3371 * srcu_notifier_chain_register(). 3372 */ 3373 int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 3374 { 3375 struct clk_notifier *cn; 3376 int ret = -ENOMEM; 3377 3378 if (!clk || !nb) 3379 return -EINVAL; 3380 3381 clk_prepare_lock(); 3382 3383 /* search the list of notifiers for this clk */ 3384 list_for_each_entry(cn, &clk_notifier_list, node) 3385 if (cn->clk == clk) 3386 break; 3387 3388 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 3389 if (cn->clk != clk) { 3390 cn = kzalloc(sizeof(*cn), GFP_KERNEL); 3391 if (!cn) 3392 goto out; 3393 3394 cn->clk = clk; 3395 srcu_init_notifier_head(&cn->notifier_head); 3396 3397 list_add(&cn->node, &clk_notifier_list); 3398 } 3399 3400 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 3401 3402 clk->core->notifier_count++; 3403 3404 out: 3405 clk_prepare_unlock(); 3406 3407 return ret; 3408 } 3409 EXPORT_SYMBOL_GPL(clk_notifier_register); 3410 3411 /** 3412 * clk_notifier_unregister - remove a clk rate change notifier 3413 * @clk: struct clk * 3414 * @nb: struct notifier_block * with callback info 3415 * 3416 * Request no further notification for changes to 'clk' and frees memory 3417 * allocated in clk_notifier_register. 3418 * 3419 * Returns -EINVAL if called with null arguments; otherwise, passes 3420 * along the return value of srcu_notifier_chain_unregister(). 3421 */ 3422 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 3423 { 3424 struct clk_notifier *cn = NULL; 3425 int ret = -EINVAL; 3426 3427 if (!clk || !nb) 3428 return -EINVAL; 3429 3430 clk_prepare_lock(); 3431 3432 list_for_each_entry(cn, &clk_notifier_list, node) 3433 if (cn->clk == clk) 3434 break; 3435 3436 if (cn->clk == clk) { 3437 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 3438 3439 clk->core->notifier_count--; 3440 3441 /* XXX the notifier code should handle this better */ 3442 if (!cn->notifier_head.head) { 3443 srcu_cleanup_notifier_head(&cn->notifier_head); 3444 list_del(&cn->node); 3445 kfree(cn); 3446 } 3447 3448 } else { 3449 ret = -ENOENT; 3450 } 3451 3452 clk_prepare_unlock(); 3453 3454 return ret; 3455 } 3456 EXPORT_SYMBOL_GPL(clk_notifier_unregister); 3457 3458 #ifdef CONFIG_OF 3459 /** 3460 * struct of_clk_provider - Clock provider registration structure 3461 * @link: Entry in global list of clock providers 3462 * @node: Pointer to device tree node of clock provider 3463 * @get: Get clock callback. Returns NULL or a struct clk for the 3464 * given clock specifier 3465 * @data: context pointer to be passed into @get callback 3466 */ 3467 struct of_clk_provider { 3468 struct list_head link; 3469 3470 struct device_node *node; 3471 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 3472 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data); 3473 void *data; 3474 }; 3475 3476 static const struct of_device_id __clk_of_table_sentinel 3477 __used __section(__clk_of_table_end); 3478 3479 static LIST_HEAD(of_clk_providers); 3480 static DEFINE_MUTEX(of_clk_mutex); 3481 3482 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 3483 void *data) 3484 { 3485 return data; 3486 } 3487 EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 3488 3489 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) 3490 { 3491 return data; 3492 } 3493 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get); 3494 3495 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 3496 { 3497 struct clk_onecell_data *clk_data = data; 3498 unsigned int idx = clkspec->args[0]; 3499 3500 if (idx >= clk_data->clk_num) { 3501 pr_err("%s: invalid clock index %u\n", __func__, idx); 3502 return ERR_PTR(-EINVAL); 3503 } 3504 3505 return clk_data->clks[idx]; 3506 } 3507 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 3508 3509 struct clk_hw * 3510 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) 3511 { 3512 struct clk_hw_onecell_data *hw_data = data; 3513 unsigned int idx = clkspec->args[0]; 3514 3515 if (idx >= hw_data->num) { 3516 pr_err("%s: invalid index %u\n", __func__, idx); 3517 return ERR_PTR(-EINVAL); 3518 } 3519 3520 return hw_data->hws[idx]; 3521 } 3522 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get); 3523 3524 /** 3525 * of_clk_add_provider() - Register a clock provider for a node 3526 * @np: Device node pointer associated with clock provider 3527 * @clk_src_get: callback for decoding clock 3528 * @data: context pointer for @clk_src_get callback. 3529 */ 3530 int of_clk_add_provider(struct device_node *np, 3531 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 3532 void *data), 3533 void *data) 3534 { 3535 struct of_clk_provider *cp; 3536 int ret; 3537 3538 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 3539 if (!cp) 3540 return -ENOMEM; 3541 3542 cp->node = of_node_get(np); 3543 cp->data = data; 3544 cp->get = clk_src_get; 3545 3546 mutex_lock(&of_clk_mutex); 3547 list_add(&cp->link, &of_clk_providers); 3548 mutex_unlock(&of_clk_mutex); 3549 pr_debug("Added clock from %pOF\n", np); 3550 3551 ret = of_clk_set_defaults(np, true); 3552 if (ret < 0) 3553 of_clk_del_provider(np); 3554 3555 return ret; 3556 } 3557 EXPORT_SYMBOL_GPL(of_clk_add_provider); 3558 3559 /** 3560 * of_clk_add_hw_provider() - Register a clock provider for a node 3561 * @np: Device node pointer associated with clock provider 3562 * @get: callback for decoding clk_hw 3563 * @data: context pointer for @get callback. 3564 */ 3565 int of_clk_add_hw_provider(struct device_node *np, 3566 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 3567 void *data), 3568 void *data) 3569 { 3570 struct of_clk_provider *cp; 3571 int ret; 3572 3573 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 3574 if (!cp) 3575 return -ENOMEM; 3576 3577 cp->node = of_node_get(np); 3578 cp->data = data; 3579 cp->get_hw = get; 3580 3581 mutex_lock(&of_clk_mutex); 3582 list_add(&cp->link, &of_clk_providers); 3583 mutex_unlock(&of_clk_mutex); 3584 pr_debug("Added clk_hw provider from %pOF\n", np); 3585 3586 ret = of_clk_set_defaults(np, true); 3587 if (ret < 0) 3588 of_clk_del_provider(np); 3589 3590 return ret; 3591 } 3592 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider); 3593 3594 static void devm_of_clk_release_provider(struct device *dev, void *res) 3595 { 3596 of_clk_del_provider(*(struct device_node **)res); 3597 } 3598 3599 int devm_of_clk_add_hw_provider(struct device *dev, 3600 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 3601 void *data), 3602 void *data) 3603 { 3604 struct device_node **ptr, *np; 3605 int ret; 3606 3607 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr), 3608 GFP_KERNEL); 3609 if (!ptr) 3610 return -ENOMEM; 3611 3612 np = dev->of_node; 3613 ret = of_clk_add_hw_provider(np, get, data); 3614 if (!ret) { 3615 *ptr = np; 3616 devres_add(dev, ptr); 3617 } else { 3618 devres_free(ptr); 3619 } 3620 3621 return ret; 3622 } 3623 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider); 3624 3625 /** 3626 * of_clk_del_provider() - Remove a previously registered clock provider 3627 * @np: Device node pointer associated with clock provider 3628 */ 3629 void of_clk_del_provider(struct device_node *np) 3630 { 3631 struct of_clk_provider *cp; 3632 3633 mutex_lock(&of_clk_mutex); 3634 list_for_each_entry(cp, &of_clk_providers, link) { 3635 if (cp->node == np) { 3636 list_del(&cp->link); 3637 of_node_put(cp->node); 3638 kfree(cp); 3639 break; 3640 } 3641 } 3642 mutex_unlock(&of_clk_mutex); 3643 } 3644 EXPORT_SYMBOL_GPL(of_clk_del_provider); 3645 3646 static int devm_clk_provider_match(struct device *dev, void *res, void *data) 3647 { 3648 struct device_node **np = res; 3649 3650 if (WARN_ON(!np || !*np)) 3651 return 0; 3652 3653 return *np == data; 3654 } 3655 3656 void devm_of_clk_del_provider(struct device *dev) 3657 { 3658 int ret; 3659 3660 ret = devres_release(dev, devm_of_clk_release_provider, 3661 devm_clk_provider_match, dev->of_node); 3662 3663 WARN_ON(ret); 3664 } 3665 EXPORT_SYMBOL(devm_of_clk_del_provider); 3666 3667 static struct clk_hw * 3668 __of_clk_get_hw_from_provider(struct of_clk_provider *provider, 3669 struct of_phandle_args *clkspec) 3670 { 3671 struct clk *clk; 3672 3673 if (provider->get_hw) 3674 return provider->get_hw(clkspec, provider->data); 3675 3676 clk = provider->get(clkspec, provider->data); 3677 if (IS_ERR(clk)) 3678 return ERR_CAST(clk); 3679 return __clk_get_hw(clk); 3680 } 3681 3682 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec, 3683 const char *dev_id, const char *con_id) 3684 { 3685 struct of_clk_provider *provider; 3686 struct clk *clk = ERR_PTR(-EPROBE_DEFER); 3687 struct clk_hw *hw; 3688 3689 if (!clkspec) 3690 return ERR_PTR(-EINVAL); 3691 3692 /* Check if we have such a provider in our array */ 3693 mutex_lock(&of_clk_mutex); 3694 list_for_each_entry(provider, &of_clk_providers, link) { 3695 if (provider->node == clkspec->np) { 3696 hw = __of_clk_get_hw_from_provider(provider, clkspec); 3697 clk = __clk_create_clk(hw, dev_id, con_id); 3698 } 3699 3700 if (!IS_ERR(clk)) { 3701 if (!__clk_get(clk)) { 3702 __clk_free_clk(clk); 3703 clk = ERR_PTR(-ENOENT); 3704 } 3705 3706 break; 3707 } 3708 } 3709 mutex_unlock(&of_clk_mutex); 3710 3711 return clk; 3712 } 3713 3714 /** 3715 * of_clk_get_from_provider() - Lookup a clock from a clock provider 3716 * @clkspec: pointer to a clock specifier data structure 3717 * 3718 * This function looks up a struct clk from the registered list of clock 3719 * providers, an input is a clock specifier data structure as returned 3720 * from the of_parse_phandle_with_args() function call. 3721 */ 3722 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 3723 { 3724 return __of_clk_get_from_provider(clkspec, NULL, __func__); 3725 } 3726 EXPORT_SYMBOL_GPL(of_clk_get_from_provider); 3727 3728 /** 3729 * of_clk_get_parent_count() - Count the number of clocks a device node has 3730 * @np: device node to count 3731 * 3732 * Returns: The number of clocks that are possible parents of this node 3733 */ 3734 unsigned int of_clk_get_parent_count(struct device_node *np) 3735 { 3736 int count; 3737 3738 count = of_count_phandle_with_args(np, "clocks", "#clock-cells"); 3739 if (count < 0) 3740 return 0; 3741 3742 return count; 3743 } 3744 EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 3745 3746 const char *of_clk_get_parent_name(struct device_node *np, int index) 3747 { 3748 struct of_phandle_args clkspec; 3749 struct property *prop; 3750 const char *clk_name; 3751 const __be32 *vp; 3752 u32 pv; 3753 int rc; 3754 int count; 3755 struct clk *clk; 3756 3757 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 3758 &clkspec); 3759 if (rc) 3760 return NULL; 3761 3762 index = clkspec.args_count ? clkspec.args[0] : 0; 3763 count = 0; 3764 3765 /* if there is an indices property, use it to transfer the index 3766 * specified into an array offset for the clock-output-names property. 3767 */ 3768 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { 3769 if (index == pv) { 3770 index = count; 3771 break; 3772 } 3773 count++; 3774 } 3775 /* We went off the end of 'clock-indices' without finding it */ 3776 if (prop && !vp) 3777 return NULL; 3778 3779 if (of_property_read_string_index(clkspec.np, "clock-output-names", 3780 index, 3781 &clk_name) < 0) { 3782 /* 3783 * Best effort to get the name if the clock has been 3784 * registered with the framework. If the clock isn't 3785 * registered, we return the node name as the name of 3786 * the clock as long as #clock-cells = 0. 3787 */ 3788 clk = of_clk_get_from_provider(&clkspec); 3789 if (IS_ERR(clk)) { 3790 if (clkspec.args_count == 0) 3791 clk_name = clkspec.np->name; 3792 else 3793 clk_name = NULL; 3794 } else { 3795 clk_name = __clk_get_name(clk); 3796 clk_put(clk); 3797 } 3798 } 3799 3800 3801 of_node_put(clkspec.np); 3802 return clk_name; 3803 } 3804 EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 3805 3806 /** 3807 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return 3808 * number of parents 3809 * @np: Device node pointer associated with clock provider 3810 * @parents: pointer to char array that hold the parents' names 3811 * @size: size of the @parents array 3812 * 3813 * Return: number of parents for the clock node. 3814 */ 3815 int of_clk_parent_fill(struct device_node *np, const char **parents, 3816 unsigned int size) 3817 { 3818 unsigned int i = 0; 3819 3820 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) 3821 i++; 3822 3823 return i; 3824 } 3825 EXPORT_SYMBOL_GPL(of_clk_parent_fill); 3826 3827 struct clock_provider { 3828 void (*clk_init_cb)(struct device_node *); 3829 struct device_node *np; 3830 struct list_head node; 3831 }; 3832 3833 /* 3834 * This function looks for a parent clock. If there is one, then it 3835 * checks that the provider for this parent clock was initialized, in 3836 * this case the parent clock will be ready. 3837 */ 3838 static int parent_ready(struct device_node *np) 3839 { 3840 int i = 0; 3841 3842 while (true) { 3843 struct clk *clk = of_clk_get(np, i); 3844 3845 /* this parent is ready we can check the next one */ 3846 if (!IS_ERR(clk)) { 3847 clk_put(clk); 3848 i++; 3849 continue; 3850 } 3851 3852 /* at least one parent is not ready, we exit now */ 3853 if (PTR_ERR(clk) == -EPROBE_DEFER) 3854 return 0; 3855 3856 /* 3857 * Here we make assumption that the device tree is 3858 * written correctly. So an error means that there is 3859 * no more parent. As we didn't exit yet, then the 3860 * previous parent are ready. If there is no clock 3861 * parent, no need to wait for them, then we can 3862 * consider their absence as being ready 3863 */ 3864 return 1; 3865 } 3866 } 3867 3868 /** 3869 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree 3870 * @np: Device node pointer associated with clock provider 3871 * @index: clock index 3872 * @flags: pointer to top-level framework flags 3873 * 3874 * Detects if the clock-critical property exists and, if so, sets the 3875 * corresponding CLK_IS_CRITICAL flag. 3876 * 3877 * Do not use this function. It exists only for legacy Device Tree 3878 * bindings, such as the one-clock-per-node style that are outdated. 3879 * Those bindings typically put all clock data into .dts and the Linux 3880 * driver has no clock data, thus making it impossible to set this flag 3881 * correctly from the driver. Only those drivers may call 3882 * of_clk_detect_critical from their setup functions. 3883 * 3884 * Return: error code or zero on success 3885 */ 3886 int of_clk_detect_critical(struct device_node *np, 3887 int index, unsigned long *flags) 3888 { 3889 struct property *prop; 3890 const __be32 *cur; 3891 uint32_t idx; 3892 3893 if (!np || !flags) 3894 return -EINVAL; 3895 3896 of_property_for_each_u32(np, "clock-critical", prop, cur, idx) 3897 if (index == idx) 3898 *flags |= CLK_IS_CRITICAL; 3899 3900 return 0; 3901 } 3902 3903 /** 3904 * of_clk_init() - Scan and init clock providers from the DT 3905 * @matches: array of compatible values and init functions for providers. 3906 * 3907 * This function scans the device tree for matching clock providers 3908 * and calls their initialization functions. It also does it by trying 3909 * to follow the dependencies. 3910 */ 3911 void __init of_clk_init(const struct of_device_id *matches) 3912 { 3913 const struct of_device_id *match; 3914 struct device_node *np; 3915 struct clock_provider *clk_provider, *next; 3916 bool is_init_done; 3917 bool force = false; 3918 LIST_HEAD(clk_provider_list); 3919 3920 if (!matches) 3921 matches = &__clk_of_table; 3922 3923 /* First prepare the list of the clocks providers */ 3924 for_each_matching_node_and_match(np, matches, &match) { 3925 struct clock_provider *parent; 3926 3927 if (!of_device_is_available(np)) 3928 continue; 3929 3930 parent = kzalloc(sizeof(*parent), GFP_KERNEL); 3931 if (!parent) { 3932 list_for_each_entry_safe(clk_provider, next, 3933 &clk_provider_list, node) { 3934 list_del(&clk_provider->node); 3935 of_node_put(clk_provider->np); 3936 kfree(clk_provider); 3937 } 3938 of_node_put(np); 3939 return; 3940 } 3941 3942 parent->clk_init_cb = match->data; 3943 parent->np = of_node_get(np); 3944 list_add_tail(&parent->node, &clk_provider_list); 3945 } 3946 3947 while (!list_empty(&clk_provider_list)) { 3948 is_init_done = false; 3949 list_for_each_entry_safe(clk_provider, next, 3950 &clk_provider_list, node) { 3951 if (force || parent_ready(clk_provider->np)) { 3952 3953 /* Don't populate platform devices */ 3954 of_node_set_flag(clk_provider->np, 3955 OF_POPULATED); 3956 3957 clk_provider->clk_init_cb(clk_provider->np); 3958 of_clk_set_defaults(clk_provider->np, true); 3959 3960 list_del(&clk_provider->node); 3961 of_node_put(clk_provider->np); 3962 kfree(clk_provider); 3963 is_init_done = true; 3964 } 3965 } 3966 3967 /* 3968 * We didn't manage to initialize any of the 3969 * remaining providers during the last loop, so now we 3970 * initialize all the remaining ones unconditionally 3971 * in case the clock parent was not mandatory 3972 */ 3973 if (!is_init_done) 3974 force = true; 3975 } 3976 } 3977 #endif 3978