1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 4 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 5 * 6 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst 7 */ 8 9 #include <linux/clk.h> 10 #include <linux/clk-provider.h> 11 #include <linux/clk/clk-conf.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/spinlock.h> 15 #include <linux/err.h> 16 #include <linux/list.h> 17 #include <linux/slab.h> 18 #include <linux/of.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/sched.h> 23 #include <linux/clkdev.h> 24 25 #include "clk.h" 26 27 static DEFINE_SPINLOCK(enable_lock); 28 static DEFINE_MUTEX(prepare_lock); 29 30 static struct task_struct *prepare_owner; 31 static struct task_struct *enable_owner; 32 33 static int prepare_refcnt; 34 static int enable_refcnt; 35 36 static HLIST_HEAD(clk_root_list); 37 static HLIST_HEAD(clk_orphan_list); 38 static LIST_HEAD(clk_notifier_list); 39 40 /* List of registered clks that use runtime PM */ 41 static HLIST_HEAD(clk_rpm_list); 42 static DEFINE_MUTEX(clk_rpm_list_lock); 43 44 static const struct hlist_head *all_lists[] = { 45 &clk_root_list, 46 &clk_orphan_list, 47 NULL, 48 }; 49 50 /*** private data structures ***/ 51 52 struct clk_parent_map { 53 const struct clk_hw *hw; 54 struct clk_core *core; 55 const char *fw_name; 56 const char *name; 57 int index; 58 }; 59 60 struct clk_core { 61 const char *name; 62 const struct clk_ops *ops; 63 struct clk_hw *hw; 64 struct module *owner; 65 struct device *dev; 66 struct hlist_node rpm_node; 67 struct device_node *of_node; 68 struct clk_core *parent; 69 struct clk_parent_map *parents; 70 u8 num_parents; 71 u8 new_parent_index; 72 unsigned long rate; 73 unsigned long req_rate; 74 unsigned long new_rate; 75 struct clk_core *new_parent; 76 struct clk_core *new_child; 77 unsigned long flags; 78 bool orphan; 79 bool rpm_enabled; 80 unsigned int enable_count; 81 unsigned int prepare_count; 82 unsigned int protect_count; 83 unsigned long min_rate; 84 unsigned long max_rate; 85 unsigned long accuracy; 86 int phase; 87 struct clk_duty duty; 88 struct hlist_head children; 89 struct hlist_node child_node; 90 struct hlist_head clks; 91 unsigned int notifier_count; 92 #ifdef CONFIG_DEBUG_FS 93 struct dentry *dentry; 94 struct hlist_node debug_node; 95 #endif 96 struct kref ref; 97 }; 98 99 #define CREATE_TRACE_POINTS 100 #include <trace/events/clk.h> 101 102 struct clk { 103 struct clk_core *core; 104 struct device *dev; 105 const char *dev_id; 106 const char *con_id; 107 unsigned long min_rate; 108 unsigned long max_rate; 109 unsigned int exclusive_count; 110 struct hlist_node clks_node; 111 }; 112 113 /*** runtime pm ***/ 114 static int clk_pm_runtime_get(struct clk_core *core) 115 { 116 if (!core->rpm_enabled) 117 return 0; 118 119 return pm_runtime_resume_and_get(core->dev); 120 } 121 122 static void clk_pm_runtime_put(struct clk_core *core) 123 { 124 if (!core->rpm_enabled) 125 return; 126 127 pm_runtime_put_sync(core->dev); 128 } 129 130 /** 131 * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices 132 * 133 * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so 134 * that disabling unused clks avoids a deadlock where a device is runtime PM 135 * resuming/suspending and the runtime PM callback is trying to grab the 136 * prepare_lock for something like clk_prepare_enable() while 137 * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime 138 * PM resume/suspend the device as well. 139 * 140 * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on 141 * success. Otherwise the lock is released on failure. 142 * 143 * Return: 0 on success, negative errno otherwise. 144 */ 145 static int clk_pm_runtime_get_all(void) 146 { 147 int ret; 148 struct clk_core *core, *failed; 149 150 /* 151 * Grab the list lock to prevent any new clks from being registered 152 * or unregistered until clk_pm_runtime_put_all(). 153 */ 154 mutex_lock(&clk_rpm_list_lock); 155 156 /* 157 * Runtime PM "get" all the devices that are needed for the clks 158 * currently registered. Do this without holding the prepare_lock, to 159 * avoid the deadlock. 160 */ 161 hlist_for_each_entry(core, &clk_rpm_list, rpm_node) { 162 ret = clk_pm_runtime_get(core); 163 if (ret) { 164 failed = core; 165 pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n", 166 dev_name(failed->dev), failed->name); 167 goto err; 168 } 169 } 170 171 return 0; 172 173 err: 174 hlist_for_each_entry(core, &clk_rpm_list, rpm_node) { 175 if (core == failed) 176 break; 177 178 clk_pm_runtime_put(core); 179 } 180 mutex_unlock(&clk_rpm_list_lock); 181 182 return ret; 183 } 184 185 /** 186 * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices 187 * 188 * Put the runtime PM references taken in clk_pm_runtime_get_all() and release 189 * the 'clk_rpm_list_lock'. 190 */ 191 static void clk_pm_runtime_put_all(void) 192 { 193 struct clk_core *core; 194 195 hlist_for_each_entry(core, &clk_rpm_list, rpm_node) 196 clk_pm_runtime_put(core); 197 mutex_unlock(&clk_rpm_list_lock); 198 } 199 200 static void clk_pm_runtime_init(struct clk_core *core) 201 { 202 struct device *dev = core->dev; 203 204 if (dev && pm_runtime_enabled(dev)) { 205 core->rpm_enabled = true; 206 207 mutex_lock(&clk_rpm_list_lock); 208 hlist_add_head(&core->rpm_node, &clk_rpm_list); 209 mutex_unlock(&clk_rpm_list_lock); 210 } 211 } 212 213 /*** locking ***/ 214 static void clk_prepare_lock(void) 215 { 216 if (!mutex_trylock(&prepare_lock)) { 217 if (prepare_owner == current) { 218 prepare_refcnt++; 219 return; 220 } 221 mutex_lock(&prepare_lock); 222 } 223 WARN_ON_ONCE(prepare_owner != NULL); 224 WARN_ON_ONCE(prepare_refcnt != 0); 225 prepare_owner = current; 226 prepare_refcnt = 1; 227 } 228 229 static void clk_prepare_unlock(void) 230 { 231 WARN_ON_ONCE(prepare_owner != current); 232 WARN_ON_ONCE(prepare_refcnt == 0); 233 234 if (--prepare_refcnt) 235 return; 236 prepare_owner = NULL; 237 mutex_unlock(&prepare_lock); 238 } 239 240 static unsigned long clk_enable_lock(void) 241 __acquires(enable_lock) 242 { 243 unsigned long flags; 244 245 /* 246 * On UP systems, spin_trylock_irqsave() always returns true, even if 247 * we already hold the lock. So, in that case, we rely only on 248 * reference counting. 249 */ 250 if (!IS_ENABLED(CONFIG_SMP) || 251 !spin_trylock_irqsave(&enable_lock, flags)) { 252 if (enable_owner == current) { 253 enable_refcnt++; 254 __acquire(enable_lock); 255 if (!IS_ENABLED(CONFIG_SMP)) 256 local_save_flags(flags); 257 return flags; 258 } 259 spin_lock_irqsave(&enable_lock, flags); 260 } 261 WARN_ON_ONCE(enable_owner != NULL); 262 WARN_ON_ONCE(enable_refcnt != 0); 263 enable_owner = current; 264 enable_refcnt = 1; 265 return flags; 266 } 267 268 static void clk_enable_unlock(unsigned long flags) 269 __releases(enable_lock) 270 { 271 WARN_ON_ONCE(enable_owner != current); 272 WARN_ON_ONCE(enable_refcnt == 0); 273 274 if (--enable_refcnt) { 275 __release(enable_lock); 276 return; 277 } 278 enable_owner = NULL; 279 spin_unlock_irqrestore(&enable_lock, flags); 280 } 281 282 static bool clk_core_rate_is_protected(struct clk_core *core) 283 { 284 return core->protect_count; 285 } 286 287 static bool clk_core_is_prepared(struct clk_core *core) 288 { 289 bool ret = false; 290 291 /* 292 * .is_prepared is optional for clocks that can prepare 293 * fall back to software usage counter if it is missing 294 */ 295 if (!core->ops->is_prepared) 296 return core->prepare_count; 297 298 if (!clk_pm_runtime_get(core)) { 299 ret = core->ops->is_prepared(core->hw); 300 clk_pm_runtime_put(core); 301 } 302 303 return ret; 304 } 305 306 static bool clk_core_is_enabled(struct clk_core *core) 307 { 308 bool ret = false; 309 310 /* 311 * .is_enabled is only mandatory for clocks that gate 312 * fall back to software usage counter if .is_enabled is missing 313 */ 314 if (!core->ops->is_enabled) 315 return core->enable_count; 316 317 /* 318 * Check if clock controller's device is runtime active before 319 * calling .is_enabled callback. If not, assume that clock is 320 * disabled, because we might be called from atomic context, from 321 * which pm_runtime_get() is not allowed. 322 * This function is called mainly from clk_disable_unused_subtree, 323 * which ensures proper runtime pm activation of controller before 324 * taking enable spinlock, but the below check is needed if one tries 325 * to call it from other places. 326 */ 327 if (core->rpm_enabled) { 328 pm_runtime_get_noresume(core->dev); 329 if (!pm_runtime_active(core->dev)) { 330 ret = false; 331 goto done; 332 } 333 } 334 335 /* 336 * This could be called with the enable lock held, or from atomic 337 * context. If the parent isn't enabled already, we can't do 338 * anything here. We can also assume this clock isn't enabled. 339 */ 340 if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent) 341 if (!clk_core_is_enabled(core->parent)) { 342 ret = false; 343 goto done; 344 } 345 346 ret = core->ops->is_enabled(core->hw); 347 done: 348 if (core->rpm_enabled) 349 pm_runtime_put(core->dev); 350 351 return ret; 352 } 353 354 /*** helper functions ***/ 355 356 const char *__clk_get_name(const struct clk *clk) 357 { 358 return !clk ? NULL : clk->core->name; 359 } 360 EXPORT_SYMBOL_GPL(__clk_get_name); 361 362 const char *clk_hw_get_name(const struct clk_hw *hw) 363 { 364 return hw->core->name; 365 } 366 EXPORT_SYMBOL_GPL(clk_hw_get_name); 367 368 struct device *clk_hw_get_dev(const struct clk_hw *hw) 369 { 370 return hw->core->dev; 371 } 372 EXPORT_SYMBOL_GPL(clk_hw_get_dev); 373 374 struct device_node *clk_hw_get_of_node(const struct clk_hw *hw) 375 { 376 return hw->core->of_node; 377 } 378 EXPORT_SYMBOL_GPL(clk_hw_get_of_node); 379 380 struct clk_hw *__clk_get_hw(struct clk *clk) 381 { 382 return !clk ? NULL : clk->core->hw; 383 } 384 EXPORT_SYMBOL_GPL(__clk_get_hw); 385 386 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) 387 { 388 return hw->core->num_parents; 389 } 390 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); 391 392 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) 393 { 394 return hw->core->parent ? hw->core->parent->hw : NULL; 395 } 396 EXPORT_SYMBOL_GPL(clk_hw_get_parent); 397 398 static struct clk_core *__clk_lookup_subtree(const char *name, 399 struct clk_core *core) 400 { 401 struct clk_core *child; 402 struct clk_core *ret; 403 404 if (!strcmp(core->name, name)) 405 return core; 406 407 hlist_for_each_entry(child, &core->children, child_node) { 408 ret = __clk_lookup_subtree(name, child); 409 if (ret) 410 return ret; 411 } 412 413 return NULL; 414 } 415 416 static struct clk_core *clk_core_lookup(const char *name) 417 { 418 struct clk_core *root_clk; 419 struct clk_core *ret; 420 421 if (!name) 422 return NULL; 423 424 /* search the 'proper' clk tree first */ 425 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 426 ret = __clk_lookup_subtree(name, root_clk); 427 if (ret) 428 return ret; 429 } 430 431 /* if not found, then search the orphan tree */ 432 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 433 ret = __clk_lookup_subtree(name, root_clk); 434 if (ret) 435 return ret; 436 } 437 438 return NULL; 439 } 440 441 #ifdef CONFIG_OF 442 static int of_parse_clkspec(const struct device_node *np, int index, 443 const char *name, struct of_phandle_args *out_args); 444 static struct clk_hw * 445 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec); 446 #else 447 static inline int of_parse_clkspec(const struct device_node *np, int index, 448 const char *name, 449 struct of_phandle_args *out_args) 450 { 451 return -ENOENT; 452 } 453 static inline struct clk_hw * 454 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) 455 { 456 return ERR_PTR(-ENOENT); 457 } 458 #endif 459 460 /** 461 * clk_core_get - Find the clk_core parent of a clk 462 * @core: clk to find parent of 463 * @p_index: parent index to search for 464 * 465 * This is the preferred method for clk providers to find the parent of a 466 * clk when that parent is external to the clk controller. The parent_names 467 * array is indexed and treated as a local name matching a string in the device 468 * node's 'clock-names' property or as the 'con_id' matching the device's 469 * dev_name() in a clk_lookup. This allows clk providers to use their own 470 * namespace instead of looking for a globally unique parent string. 471 * 472 * For example the following DT snippet would allow a clock registered by the 473 * clock-controller@c001 that has a clk_init_data::parent_data array 474 * with 'xtal' in the 'name' member to find the clock provided by the 475 * clock-controller@f00abcd without needing to get the globally unique name of 476 * the xtal clk. 477 * 478 * parent: clock-controller@f00abcd { 479 * reg = <0xf00abcd 0xabcd>; 480 * #clock-cells = <0>; 481 * }; 482 * 483 * clock-controller@c001 { 484 * reg = <0xc001 0xf00d>; 485 * clocks = <&parent>; 486 * clock-names = "xtal"; 487 * #clock-cells = <1>; 488 * }; 489 * 490 * Returns: -ENOENT when the provider can't be found or the clk doesn't 491 * exist in the provider or the name can't be found in the DT node or 492 * in a clkdev lookup. NULL when the provider knows about the clk but it 493 * isn't provided on this system. 494 * A valid clk_core pointer when the clk can be found in the provider. 495 */ 496 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) 497 { 498 const char *name = core->parents[p_index].fw_name; 499 int index = core->parents[p_index].index; 500 struct clk_hw *hw = ERR_PTR(-ENOENT); 501 struct device *dev = core->dev; 502 const char *dev_id = dev ? dev_name(dev) : NULL; 503 struct device_node *np = core->of_node; 504 struct of_phandle_args clkspec; 505 506 if (np && (name || index >= 0) && 507 !of_parse_clkspec(np, index, name, &clkspec)) { 508 hw = of_clk_get_hw_from_clkspec(&clkspec); 509 of_node_put(clkspec.np); 510 } else if (name) { 511 /* 512 * If the DT search above couldn't find the provider fallback to 513 * looking up via clkdev based clk_lookups. 514 */ 515 hw = clk_find_hw(dev_id, name); 516 } 517 518 if (IS_ERR(hw)) 519 return ERR_CAST(hw); 520 521 if (!hw) 522 return NULL; 523 524 return hw->core; 525 } 526 527 static void clk_core_fill_parent_index(struct clk_core *core, u8 index) 528 { 529 struct clk_parent_map *entry = &core->parents[index]; 530 struct clk_core *parent; 531 532 if (entry->hw) { 533 parent = entry->hw->core; 534 } else { 535 parent = clk_core_get(core, index); 536 if (PTR_ERR(parent) == -ENOENT && entry->name) 537 parent = clk_core_lookup(entry->name); 538 } 539 540 /* 541 * We have a direct reference but it isn't registered yet? 542 * Orphan it and let clk_reparent() update the orphan status 543 * when the parent is registered. 544 */ 545 if (!parent) 546 parent = ERR_PTR(-EPROBE_DEFER); 547 548 /* Only cache it if it's not an error */ 549 if (!IS_ERR(parent)) 550 entry->core = parent; 551 } 552 553 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, 554 u8 index) 555 { 556 if (!core || index >= core->num_parents || !core->parents) 557 return NULL; 558 559 if (!core->parents[index].core) 560 clk_core_fill_parent_index(core, index); 561 562 return core->parents[index].core; 563 } 564 565 struct clk_hw * 566 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) 567 { 568 struct clk_core *parent; 569 570 parent = clk_core_get_parent_by_index(hw->core, index); 571 572 return !parent ? NULL : parent->hw; 573 } 574 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); 575 576 unsigned int __clk_get_enable_count(struct clk *clk) 577 { 578 return !clk ? 0 : clk->core->enable_count; 579 } 580 581 static unsigned long clk_core_get_rate_nolock(struct clk_core *core) 582 { 583 if (!core) 584 return 0; 585 586 if (!core->num_parents || core->parent) 587 return core->rate; 588 589 /* 590 * Clk must have a parent because num_parents > 0 but the parent isn't 591 * known yet. Best to return 0 as the rate of this clk until we can 592 * properly recalc the rate based on the parent's rate. 593 */ 594 return 0; 595 } 596 597 unsigned long clk_hw_get_rate(const struct clk_hw *hw) 598 { 599 return clk_core_get_rate_nolock(hw->core); 600 } 601 EXPORT_SYMBOL_GPL(clk_hw_get_rate); 602 603 static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core) 604 { 605 if (!core) 606 return 0; 607 608 return core->accuracy; 609 } 610 611 unsigned long clk_hw_get_flags(const struct clk_hw *hw) 612 { 613 return hw->core->flags; 614 } 615 EXPORT_SYMBOL_GPL(clk_hw_get_flags); 616 617 bool clk_hw_is_prepared(const struct clk_hw *hw) 618 { 619 return clk_core_is_prepared(hw->core); 620 } 621 EXPORT_SYMBOL_GPL(clk_hw_is_prepared); 622 623 bool clk_hw_is_enabled(const struct clk_hw *hw) 624 { 625 return clk_core_is_enabled(hw->core); 626 } 627 EXPORT_SYMBOL_GPL(clk_hw_is_enabled); 628 629 bool __clk_is_enabled(struct clk *clk) 630 { 631 if (!clk) 632 return false; 633 634 return clk_core_is_enabled(clk->core); 635 } 636 EXPORT_SYMBOL_GPL(__clk_is_enabled); 637 638 static bool mux_is_better_rate(unsigned long rate, unsigned long now, 639 unsigned long best, unsigned long flags) 640 { 641 if (flags & CLK_MUX_ROUND_CLOSEST) 642 return abs(now - rate) < abs(best - rate); 643 644 return now <= rate && now > best; 645 } 646 647 static void clk_core_init_rate_req(struct clk_core * const core, 648 struct clk_rate_request *req, 649 unsigned long rate); 650 651 static int clk_core_round_rate_nolock(struct clk_core *core, 652 struct clk_rate_request *req); 653 654 static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent) 655 { 656 struct clk_core *tmp; 657 unsigned int i; 658 659 /* Optimize for the case where the parent is already the parent. */ 660 if (core->parent == parent) 661 return true; 662 663 for (i = 0; i < core->num_parents; i++) { 664 tmp = clk_core_get_parent_by_index(core, i); 665 if (!tmp) 666 continue; 667 668 if (tmp == parent) 669 return true; 670 } 671 672 return false; 673 } 674 675 static void 676 clk_core_forward_rate_req(struct clk_core *core, 677 const struct clk_rate_request *old_req, 678 struct clk_core *parent, 679 struct clk_rate_request *req, 680 unsigned long parent_rate) 681 { 682 if (WARN_ON(!clk_core_has_parent(core, parent))) 683 return; 684 685 clk_core_init_rate_req(parent, req, parent_rate); 686 687 if (req->min_rate < old_req->min_rate) 688 req->min_rate = old_req->min_rate; 689 690 if (req->max_rate > old_req->max_rate) 691 req->max_rate = old_req->max_rate; 692 } 693 694 static int 695 clk_core_determine_rate_no_reparent(struct clk_hw *hw, 696 struct clk_rate_request *req) 697 { 698 struct clk_core *core = hw->core; 699 struct clk_core *parent = core->parent; 700 unsigned long best; 701 int ret; 702 703 if (core->flags & CLK_SET_RATE_PARENT) { 704 struct clk_rate_request parent_req; 705 706 if (!parent) { 707 req->rate = 0; 708 return 0; 709 } 710 711 clk_core_forward_rate_req(core, req, parent, &parent_req, 712 req->rate); 713 714 trace_clk_rate_request_start(&parent_req); 715 716 ret = clk_core_round_rate_nolock(parent, &parent_req); 717 if (ret) 718 return ret; 719 720 trace_clk_rate_request_done(&parent_req); 721 722 best = parent_req.rate; 723 } else if (parent) { 724 best = clk_core_get_rate_nolock(parent); 725 } else { 726 best = clk_core_get_rate_nolock(core); 727 } 728 729 req->best_parent_rate = best; 730 req->rate = best; 731 732 return 0; 733 } 734 735 int clk_mux_determine_rate_flags(struct clk_hw *hw, 736 struct clk_rate_request *req, 737 unsigned long flags) 738 { 739 struct clk_core *core = hw->core, *parent, *best_parent = NULL; 740 int i, num_parents, ret; 741 unsigned long best = 0; 742 743 /* if NO_REPARENT flag set, pass through to current parent */ 744 if (core->flags & CLK_SET_RATE_NO_REPARENT) 745 return clk_core_determine_rate_no_reparent(hw, req); 746 747 /* find the parent that can provide the fastest rate <= rate */ 748 num_parents = core->num_parents; 749 for (i = 0; i < num_parents; i++) { 750 unsigned long parent_rate; 751 752 parent = clk_core_get_parent_by_index(core, i); 753 if (!parent) 754 continue; 755 756 if (core->flags & CLK_SET_RATE_PARENT) { 757 struct clk_rate_request parent_req; 758 759 clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate); 760 761 trace_clk_rate_request_start(&parent_req); 762 763 ret = clk_core_round_rate_nolock(parent, &parent_req); 764 if (ret) 765 continue; 766 767 trace_clk_rate_request_done(&parent_req); 768 769 parent_rate = parent_req.rate; 770 } else { 771 parent_rate = clk_core_get_rate_nolock(parent); 772 } 773 774 if (mux_is_better_rate(req->rate, parent_rate, 775 best, flags)) { 776 best_parent = parent; 777 best = parent_rate; 778 } 779 } 780 781 if (!best_parent) 782 return -EINVAL; 783 784 req->best_parent_hw = best_parent->hw; 785 req->best_parent_rate = best; 786 req->rate = best; 787 788 return 0; 789 } 790 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); 791 792 struct clk *__clk_lookup(const char *name) 793 { 794 struct clk_core *core = clk_core_lookup(name); 795 796 return !core ? NULL : core->hw->clk; 797 } 798 799 static void clk_core_get_boundaries(struct clk_core *core, 800 unsigned long *min_rate, 801 unsigned long *max_rate) 802 { 803 struct clk *clk_user; 804 805 lockdep_assert_held(&prepare_lock); 806 807 *min_rate = core->min_rate; 808 *max_rate = core->max_rate; 809 810 hlist_for_each_entry(clk_user, &core->clks, clks_node) 811 *min_rate = max(*min_rate, clk_user->min_rate); 812 813 hlist_for_each_entry(clk_user, &core->clks, clks_node) 814 *max_rate = min(*max_rate, clk_user->max_rate); 815 } 816 817 /* 818 * clk_hw_get_rate_range() - returns the clock rate range for a hw clk 819 * @hw: the hw clk we want to get the range from 820 * @min_rate: pointer to the variable that will hold the minimum 821 * @max_rate: pointer to the variable that will hold the maximum 822 * 823 * Fills the @min_rate and @max_rate variables with the minimum and 824 * maximum that clock can reach. 825 */ 826 void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate, 827 unsigned long *max_rate) 828 { 829 clk_core_get_boundaries(hw->core, min_rate, max_rate); 830 } 831 EXPORT_SYMBOL_GPL(clk_hw_get_rate_range); 832 833 static bool clk_core_check_boundaries(struct clk_core *core, 834 unsigned long min_rate, 835 unsigned long max_rate) 836 { 837 struct clk *user; 838 839 lockdep_assert_held(&prepare_lock); 840 841 if (min_rate > core->max_rate || max_rate < core->min_rate) 842 return false; 843 844 hlist_for_each_entry(user, &core->clks, clks_node) 845 if (min_rate > user->max_rate || max_rate < user->min_rate) 846 return false; 847 848 return true; 849 } 850 851 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, 852 unsigned long max_rate) 853 { 854 hw->core->min_rate = min_rate; 855 hw->core->max_rate = max_rate; 856 } 857 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); 858 859 /* 860 * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk 861 * @hw: mux type clk to determine rate on 862 * @req: rate request, also used to return preferred parent and frequencies 863 * 864 * Helper for finding best parent to provide a given frequency. This can be used 865 * directly as a determine_rate callback (e.g. for a mux), or from a more 866 * complex clock that may combine a mux with other operations. 867 * 868 * Returns: 0 on success, -EERROR value on error 869 */ 870 int __clk_mux_determine_rate(struct clk_hw *hw, 871 struct clk_rate_request *req) 872 { 873 return clk_mux_determine_rate_flags(hw, req, 0); 874 } 875 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 876 877 int __clk_mux_determine_rate_closest(struct clk_hw *hw, 878 struct clk_rate_request *req) 879 { 880 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); 881 } 882 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); 883 884 /* 885 * clk_hw_determine_rate_no_reparent - clk_ops::determine_rate implementation for a clk that doesn't reparent 886 * @hw: mux type clk to determine rate on 887 * @req: rate request, also used to return preferred frequency 888 * 889 * Helper for finding best parent rate to provide a given frequency. 890 * This can be used directly as a determine_rate callback (e.g. for a 891 * mux), or from a more complex clock that may combine a mux with other 892 * operations. 893 * 894 * Returns: 0 on success, -EERROR value on error 895 */ 896 int clk_hw_determine_rate_no_reparent(struct clk_hw *hw, 897 struct clk_rate_request *req) 898 { 899 return clk_core_determine_rate_no_reparent(hw, req); 900 } 901 EXPORT_SYMBOL_GPL(clk_hw_determine_rate_no_reparent); 902 903 /*** clk api ***/ 904 905 static void clk_core_rate_unprotect(struct clk_core *core) 906 { 907 lockdep_assert_held(&prepare_lock); 908 909 if (!core) 910 return; 911 912 if (WARN(core->protect_count == 0, 913 "%s already unprotected\n", core->name)) 914 return; 915 916 if (--core->protect_count > 0) 917 return; 918 919 clk_core_rate_unprotect(core->parent); 920 } 921 922 static int clk_core_rate_nuke_protect(struct clk_core *core) 923 { 924 int ret; 925 926 lockdep_assert_held(&prepare_lock); 927 928 if (!core) 929 return -EINVAL; 930 931 if (core->protect_count == 0) 932 return 0; 933 934 ret = core->protect_count; 935 core->protect_count = 1; 936 clk_core_rate_unprotect(core); 937 938 return ret; 939 } 940 941 /** 942 * clk_rate_exclusive_put - release exclusivity over clock rate control 943 * @clk: the clk over which the exclusivity is released 944 * 945 * clk_rate_exclusive_put() completes a critical section during which a clock 946 * consumer cannot tolerate any other consumer making any operation on the 947 * clock which could result in a rate change or rate glitch. Exclusive clocks 948 * cannot have their rate changed, either directly or indirectly due to changes 949 * further up the parent chain of clocks. As a result, clocks up parent chain 950 * also get under exclusive control of the calling consumer. 951 * 952 * If exlusivity is claimed more than once on clock, even by the same consumer, 953 * the rate effectively gets locked as exclusivity can't be preempted. 954 * 955 * Calls to clk_rate_exclusive_put() must be balanced with calls to 956 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return 957 * error status. 958 */ 959 void clk_rate_exclusive_put(struct clk *clk) 960 { 961 if (!clk) 962 return; 963 964 clk_prepare_lock(); 965 966 /* 967 * if there is something wrong with this consumer protect count, stop 968 * here before messing with the provider 969 */ 970 if (WARN_ON(clk->exclusive_count <= 0)) 971 goto out; 972 973 clk_core_rate_unprotect(clk->core); 974 clk->exclusive_count--; 975 out: 976 clk_prepare_unlock(); 977 } 978 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); 979 980 static void clk_core_rate_protect(struct clk_core *core) 981 { 982 lockdep_assert_held(&prepare_lock); 983 984 if (!core) 985 return; 986 987 if (core->protect_count == 0) 988 clk_core_rate_protect(core->parent); 989 990 core->protect_count++; 991 } 992 993 static void clk_core_rate_restore_protect(struct clk_core *core, int count) 994 { 995 lockdep_assert_held(&prepare_lock); 996 997 if (!core) 998 return; 999 1000 if (count == 0) 1001 return; 1002 1003 clk_core_rate_protect(core); 1004 core->protect_count = count; 1005 } 1006 1007 /** 1008 * clk_rate_exclusive_get - get exclusivity over the clk rate control 1009 * @clk: the clk over which the exclusity of rate control is requested 1010 * 1011 * clk_rate_exclusive_get() begins a critical section during which a clock 1012 * consumer cannot tolerate any other consumer making any operation on the 1013 * clock which could result in a rate change or rate glitch. Exclusive clocks 1014 * cannot have their rate changed, either directly or indirectly due to changes 1015 * further up the parent chain of clocks. As a result, clocks up parent chain 1016 * also get under exclusive control of the calling consumer. 1017 * 1018 * If exlusivity is claimed more than once on clock, even by the same consumer, 1019 * the rate effectively gets locked as exclusivity can't be preempted. 1020 * 1021 * Calls to clk_rate_exclusive_get() should be balanced with calls to 1022 * clk_rate_exclusive_put(). Calls to this function may sleep. 1023 * Returns 0 on success, -EERROR otherwise 1024 */ 1025 int clk_rate_exclusive_get(struct clk *clk) 1026 { 1027 if (!clk) 1028 return 0; 1029 1030 clk_prepare_lock(); 1031 clk_core_rate_protect(clk->core); 1032 clk->exclusive_count++; 1033 clk_prepare_unlock(); 1034 1035 return 0; 1036 } 1037 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); 1038 1039 static void devm_clk_rate_exclusive_put(void *data) 1040 { 1041 struct clk *clk = data; 1042 1043 clk_rate_exclusive_put(clk); 1044 } 1045 1046 int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk) 1047 { 1048 int ret; 1049 1050 ret = clk_rate_exclusive_get(clk); 1051 if (ret) 1052 return ret; 1053 1054 return devm_add_action_or_reset(dev, devm_clk_rate_exclusive_put, clk); 1055 } 1056 EXPORT_SYMBOL_GPL(devm_clk_rate_exclusive_get); 1057 1058 static void clk_core_unprepare(struct clk_core *core) 1059 { 1060 lockdep_assert_held(&prepare_lock); 1061 1062 if (!core) 1063 return; 1064 1065 if (WARN(core->prepare_count == 0, 1066 "%s already unprepared\n", core->name)) 1067 return; 1068 1069 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, 1070 "Unpreparing critical %s\n", core->name)) 1071 return; 1072 1073 if (core->flags & CLK_SET_RATE_GATE) 1074 clk_core_rate_unprotect(core); 1075 1076 if (--core->prepare_count > 0) 1077 return; 1078 1079 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name); 1080 1081 trace_clk_unprepare(core); 1082 1083 if (core->ops->unprepare) 1084 core->ops->unprepare(core->hw); 1085 1086 trace_clk_unprepare_complete(core); 1087 clk_core_unprepare(core->parent); 1088 clk_pm_runtime_put(core); 1089 } 1090 1091 static void clk_core_unprepare_lock(struct clk_core *core) 1092 { 1093 clk_prepare_lock(); 1094 clk_core_unprepare(core); 1095 clk_prepare_unlock(); 1096 } 1097 1098 /** 1099 * clk_unprepare - undo preparation of a clock source 1100 * @clk: the clk being unprepared 1101 * 1102 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 1103 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 1104 * if the operation may sleep. One example is a clk which is accessed over 1105 * I2c. In the complex case a clk gate operation may require a fast and a slow 1106 * part. It is this reason that clk_unprepare and clk_disable are not mutually 1107 * exclusive. In fact clk_disable must be called before clk_unprepare. 1108 */ 1109 void clk_unprepare(struct clk *clk) 1110 { 1111 if (IS_ERR_OR_NULL(clk)) 1112 return; 1113 1114 clk_core_unprepare_lock(clk->core); 1115 } 1116 EXPORT_SYMBOL_GPL(clk_unprepare); 1117 1118 static int clk_core_prepare(struct clk_core *core) 1119 { 1120 int ret = 0; 1121 1122 lockdep_assert_held(&prepare_lock); 1123 1124 if (!core) 1125 return 0; 1126 1127 if (core->prepare_count == 0) { 1128 ret = clk_pm_runtime_get(core); 1129 if (ret) 1130 return ret; 1131 1132 ret = clk_core_prepare(core->parent); 1133 if (ret) 1134 goto runtime_put; 1135 1136 trace_clk_prepare(core); 1137 1138 if (core->ops->prepare) 1139 ret = core->ops->prepare(core->hw); 1140 1141 trace_clk_prepare_complete(core); 1142 1143 if (ret) 1144 goto unprepare; 1145 } 1146 1147 core->prepare_count++; 1148 1149 /* 1150 * CLK_SET_RATE_GATE is a special case of clock protection 1151 * Instead of a consumer claiming exclusive rate control, it is 1152 * actually the provider which prevents any consumer from making any 1153 * operation which could result in a rate change or rate glitch while 1154 * the clock is prepared. 1155 */ 1156 if (core->flags & CLK_SET_RATE_GATE) 1157 clk_core_rate_protect(core); 1158 1159 return 0; 1160 unprepare: 1161 clk_core_unprepare(core->parent); 1162 runtime_put: 1163 clk_pm_runtime_put(core); 1164 return ret; 1165 } 1166 1167 static int clk_core_prepare_lock(struct clk_core *core) 1168 { 1169 int ret; 1170 1171 clk_prepare_lock(); 1172 ret = clk_core_prepare(core); 1173 clk_prepare_unlock(); 1174 1175 return ret; 1176 } 1177 1178 /** 1179 * clk_prepare - prepare a clock source 1180 * @clk: the clk being prepared 1181 * 1182 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 1183 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 1184 * operation may sleep. One example is a clk which is accessed over I2c. In 1185 * the complex case a clk ungate operation may require a fast and a slow part. 1186 * It is this reason that clk_prepare and clk_enable are not mutually 1187 * exclusive. In fact clk_prepare must be called before clk_enable. 1188 * Returns 0 on success, -EERROR otherwise. 1189 */ 1190 int clk_prepare(struct clk *clk) 1191 { 1192 if (!clk) 1193 return 0; 1194 1195 return clk_core_prepare_lock(clk->core); 1196 } 1197 EXPORT_SYMBOL_GPL(clk_prepare); 1198 1199 static void clk_core_disable(struct clk_core *core) 1200 { 1201 lockdep_assert_held(&enable_lock); 1202 1203 if (!core) 1204 return; 1205 1206 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name)) 1207 return; 1208 1209 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, 1210 "Disabling critical %s\n", core->name)) 1211 return; 1212 1213 if (--core->enable_count > 0) 1214 return; 1215 1216 trace_clk_disable(core); 1217 1218 if (core->ops->disable) 1219 core->ops->disable(core->hw); 1220 1221 trace_clk_disable_complete(core); 1222 1223 clk_core_disable(core->parent); 1224 } 1225 1226 static void clk_core_disable_lock(struct clk_core *core) 1227 { 1228 unsigned long flags; 1229 1230 flags = clk_enable_lock(); 1231 clk_core_disable(core); 1232 clk_enable_unlock(flags); 1233 } 1234 1235 /** 1236 * clk_disable - gate a clock 1237 * @clk: the clk being gated 1238 * 1239 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 1240 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 1241 * clk if the operation is fast and will never sleep. One example is a 1242 * SoC-internal clk which is controlled via simple register writes. In the 1243 * complex case a clk gate operation may require a fast and a slow part. It is 1244 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 1245 * In fact clk_disable must be called before clk_unprepare. 1246 */ 1247 void clk_disable(struct clk *clk) 1248 { 1249 if (IS_ERR_OR_NULL(clk)) 1250 return; 1251 1252 clk_core_disable_lock(clk->core); 1253 } 1254 EXPORT_SYMBOL_GPL(clk_disable); 1255 1256 static int clk_core_enable(struct clk_core *core) 1257 { 1258 int ret = 0; 1259 1260 lockdep_assert_held(&enable_lock); 1261 1262 if (!core) 1263 return 0; 1264 1265 if (WARN(core->prepare_count == 0, 1266 "Enabling unprepared %s\n", core->name)) 1267 return -ESHUTDOWN; 1268 1269 if (core->enable_count == 0) { 1270 ret = clk_core_enable(core->parent); 1271 1272 if (ret) 1273 return ret; 1274 1275 trace_clk_enable(core); 1276 1277 if (core->ops->enable) 1278 ret = core->ops->enable(core->hw); 1279 1280 trace_clk_enable_complete(core); 1281 1282 if (ret) { 1283 clk_core_disable(core->parent); 1284 return ret; 1285 } 1286 } 1287 1288 core->enable_count++; 1289 return 0; 1290 } 1291 1292 static int clk_core_enable_lock(struct clk_core *core) 1293 { 1294 unsigned long flags; 1295 int ret; 1296 1297 flags = clk_enable_lock(); 1298 ret = clk_core_enable(core); 1299 clk_enable_unlock(flags); 1300 1301 return ret; 1302 } 1303 1304 /** 1305 * clk_gate_restore_context - restore context for poweroff 1306 * @hw: the clk_hw pointer of clock whose state is to be restored 1307 * 1308 * The clock gate restore context function enables or disables 1309 * the gate clocks based on the enable_count. This is done in cases 1310 * where the clock context is lost and based on the enable_count 1311 * the clock either needs to be enabled/disabled. This 1312 * helps restore the state of gate clocks. 1313 */ 1314 void clk_gate_restore_context(struct clk_hw *hw) 1315 { 1316 struct clk_core *core = hw->core; 1317 1318 if (core->enable_count) 1319 core->ops->enable(hw); 1320 else 1321 core->ops->disable(hw); 1322 } 1323 EXPORT_SYMBOL_GPL(clk_gate_restore_context); 1324 1325 static int clk_core_save_context(struct clk_core *core) 1326 { 1327 struct clk_core *child; 1328 int ret = 0; 1329 1330 hlist_for_each_entry(child, &core->children, child_node) { 1331 ret = clk_core_save_context(child); 1332 if (ret < 0) 1333 return ret; 1334 } 1335 1336 if (core->ops && core->ops->save_context) 1337 ret = core->ops->save_context(core->hw); 1338 1339 return ret; 1340 } 1341 1342 static void clk_core_restore_context(struct clk_core *core) 1343 { 1344 struct clk_core *child; 1345 1346 if (core->ops && core->ops->restore_context) 1347 core->ops->restore_context(core->hw); 1348 1349 hlist_for_each_entry(child, &core->children, child_node) 1350 clk_core_restore_context(child); 1351 } 1352 1353 /** 1354 * clk_save_context - save clock context for poweroff 1355 * 1356 * Saves the context of the clock register for powerstates in which the 1357 * contents of the registers will be lost. Occurs deep within the suspend 1358 * code. Returns 0 on success. 1359 */ 1360 int clk_save_context(void) 1361 { 1362 struct clk_core *clk; 1363 int ret; 1364 1365 hlist_for_each_entry(clk, &clk_root_list, child_node) { 1366 ret = clk_core_save_context(clk); 1367 if (ret < 0) 1368 return ret; 1369 } 1370 1371 hlist_for_each_entry(clk, &clk_orphan_list, child_node) { 1372 ret = clk_core_save_context(clk); 1373 if (ret < 0) 1374 return ret; 1375 } 1376 1377 return 0; 1378 } 1379 EXPORT_SYMBOL_GPL(clk_save_context); 1380 1381 /** 1382 * clk_restore_context - restore clock context after poweroff 1383 * 1384 * Restore the saved clock context upon resume. 1385 * 1386 */ 1387 void clk_restore_context(void) 1388 { 1389 struct clk_core *core; 1390 1391 hlist_for_each_entry(core, &clk_root_list, child_node) 1392 clk_core_restore_context(core); 1393 1394 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1395 clk_core_restore_context(core); 1396 } 1397 EXPORT_SYMBOL_GPL(clk_restore_context); 1398 1399 /** 1400 * clk_enable - ungate a clock 1401 * @clk: the clk being ungated 1402 * 1403 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 1404 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 1405 * if the operation will never sleep. One example is a SoC-internal clk which 1406 * is controlled via simple register writes. In the complex case a clk ungate 1407 * operation may require a fast and a slow part. It is this reason that 1408 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 1409 * must be called before clk_enable. Returns 0 on success, -EERROR 1410 * otherwise. 1411 */ 1412 int clk_enable(struct clk *clk) 1413 { 1414 if (!clk) 1415 return 0; 1416 1417 return clk_core_enable_lock(clk->core); 1418 } 1419 EXPORT_SYMBOL_GPL(clk_enable); 1420 1421 /** 1422 * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. 1423 * @clk: clock source 1424 * 1425 * Returns true if clk_prepare() implicitly enables the clock, effectively 1426 * making clk_enable()/clk_disable() no-ops, false otherwise. 1427 * 1428 * This is of interest mainly to power management code where actually 1429 * disabling the clock also requires unpreparing it to have any material 1430 * effect. 1431 * 1432 * Regardless of the value returned here, the caller must always invoke 1433 * clk_enable() or clk_prepare_enable() and counterparts for usage counts 1434 * to be right. 1435 */ 1436 bool clk_is_enabled_when_prepared(struct clk *clk) 1437 { 1438 return clk && !(clk->core->ops->enable && clk->core->ops->disable); 1439 } 1440 EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared); 1441 1442 static int clk_core_prepare_enable(struct clk_core *core) 1443 { 1444 int ret; 1445 1446 ret = clk_core_prepare_lock(core); 1447 if (ret) 1448 return ret; 1449 1450 ret = clk_core_enable_lock(core); 1451 if (ret) 1452 clk_core_unprepare_lock(core); 1453 1454 return ret; 1455 } 1456 1457 static void clk_core_disable_unprepare(struct clk_core *core) 1458 { 1459 clk_core_disable_lock(core); 1460 clk_core_unprepare_lock(core); 1461 } 1462 1463 static void __init clk_unprepare_unused_subtree(struct clk_core *core) 1464 { 1465 struct clk_core *child; 1466 1467 lockdep_assert_held(&prepare_lock); 1468 1469 hlist_for_each_entry(child, &core->children, child_node) 1470 clk_unprepare_unused_subtree(child); 1471 1472 if (core->prepare_count) 1473 return; 1474 1475 if (core->flags & CLK_IGNORE_UNUSED) 1476 return; 1477 1478 if (clk_core_is_prepared(core)) { 1479 trace_clk_unprepare(core); 1480 if (core->ops->unprepare_unused) 1481 core->ops->unprepare_unused(core->hw); 1482 else if (core->ops->unprepare) 1483 core->ops->unprepare(core->hw); 1484 trace_clk_unprepare_complete(core); 1485 } 1486 } 1487 1488 static void __init clk_disable_unused_subtree(struct clk_core *core) 1489 { 1490 struct clk_core *child; 1491 unsigned long flags; 1492 1493 lockdep_assert_held(&prepare_lock); 1494 1495 hlist_for_each_entry(child, &core->children, child_node) 1496 clk_disable_unused_subtree(child); 1497 1498 if (core->flags & CLK_OPS_PARENT_ENABLE) 1499 clk_core_prepare_enable(core->parent); 1500 1501 flags = clk_enable_lock(); 1502 1503 if (core->enable_count) 1504 goto unlock_out; 1505 1506 if (core->flags & CLK_IGNORE_UNUSED) 1507 goto unlock_out; 1508 1509 /* 1510 * some gate clocks have special needs during the disable-unused 1511 * sequence. call .disable_unused if available, otherwise fall 1512 * back to .disable 1513 */ 1514 if (clk_core_is_enabled(core)) { 1515 trace_clk_disable(core); 1516 if (core->ops->disable_unused) 1517 core->ops->disable_unused(core->hw); 1518 else if (core->ops->disable) 1519 core->ops->disable(core->hw); 1520 trace_clk_disable_complete(core); 1521 } 1522 1523 unlock_out: 1524 clk_enable_unlock(flags); 1525 if (core->flags & CLK_OPS_PARENT_ENABLE) 1526 clk_core_disable_unprepare(core->parent); 1527 } 1528 1529 static bool clk_ignore_unused __initdata; 1530 static int __init clk_ignore_unused_setup(char *__unused) 1531 { 1532 clk_ignore_unused = true; 1533 return 1; 1534 } 1535 __setup("clk_ignore_unused", clk_ignore_unused_setup); 1536 1537 static int __init clk_disable_unused(void) 1538 { 1539 struct clk_core *core; 1540 int ret; 1541 1542 if (clk_ignore_unused) { 1543 pr_warn("clk: Not disabling unused clocks\n"); 1544 return 0; 1545 } 1546 1547 pr_info("clk: Disabling unused clocks\n"); 1548 1549 ret = clk_pm_runtime_get_all(); 1550 if (ret) 1551 return ret; 1552 /* 1553 * Grab the prepare lock to keep the clk topology stable while iterating 1554 * over clks. 1555 */ 1556 clk_prepare_lock(); 1557 1558 hlist_for_each_entry(core, &clk_root_list, child_node) 1559 clk_disable_unused_subtree(core); 1560 1561 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1562 clk_disable_unused_subtree(core); 1563 1564 hlist_for_each_entry(core, &clk_root_list, child_node) 1565 clk_unprepare_unused_subtree(core); 1566 1567 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1568 clk_unprepare_unused_subtree(core); 1569 1570 clk_prepare_unlock(); 1571 1572 clk_pm_runtime_put_all(); 1573 1574 return 0; 1575 } 1576 late_initcall_sync(clk_disable_unused); 1577 1578 static int clk_core_determine_round_nolock(struct clk_core *core, 1579 struct clk_rate_request *req) 1580 { 1581 long rate; 1582 1583 lockdep_assert_held(&prepare_lock); 1584 1585 if (!core) 1586 return 0; 1587 1588 /* 1589 * Some clock providers hand-craft their clk_rate_requests and 1590 * might not fill min_rate and max_rate. 1591 * 1592 * If it's the case, clamping the rate is equivalent to setting 1593 * the rate to 0 which is bad. Skip the clamping but complain so 1594 * that it gets fixed, hopefully. 1595 */ 1596 if (!req->min_rate && !req->max_rate) 1597 pr_warn("%s: %s: clk_rate_request has initialized min or max rate.\n", 1598 __func__, core->name); 1599 else 1600 req->rate = clamp(req->rate, req->min_rate, req->max_rate); 1601 1602 /* 1603 * At this point, core protection will be disabled 1604 * - if the provider is not protected at all 1605 * - if the calling consumer is the only one which has exclusivity 1606 * over the provider 1607 */ 1608 if (clk_core_rate_is_protected(core)) { 1609 req->rate = core->rate; 1610 } else if (core->ops->determine_rate) { 1611 return core->ops->determine_rate(core->hw, req); 1612 } else if (core->ops->round_rate) { 1613 rate = core->ops->round_rate(core->hw, req->rate, 1614 &req->best_parent_rate); 1615 if (rate < 0) 1616 return rate; 1617 1618 req->rate = rate; 1619 } else { 1620 return -EINVAL; 1621 } 1622 1623 return 0; 1624 } 1625 1626 static void clk_core_init_rate_req(struct clk_core * const core, 1627 struct clk_rate_request *req, 1628 unsigned long rate) 1629 { 1630 struct clk_core *parent; 1631 1632 if (WARN_ON(!req)) 1633 return; 1634 1635 memset(req, 0, sizeof(*req)); 1636 req->max_rate = ULONG_MAX; 1637 1638 if (!core) 1639 return; 1640 1641 req->core = core; 1642 req->rate = rate; 1643 clk_core_get_boundaries(core, &req->min_rate, &req->max_rate); 1644 1645 parent = core->parent; 1646 if (parent) { 1647 req->best_parent_hw = parent->hw; 1648 req->best_parent_rate = parent->rate; 1649 } else { 1650 req->best_parent_hw = NULL; 1651 req->best_parent_rate = 0; 1652 } 1653 } 1654 1655 /** 1656 * clk_hw_init_rate_request - Initializes a clk_rate_request 1657 * @hw: the clk for which we want to submit a rate request 1658 * @req: the clk_rate_request structure we want to initialise 1659 * @rate: the rate which is to be requested 1660 * 1661 * Initializes a clk_rate_request structure to submit to 1662 * __clk_determine_rate() or similar functions. 1663 */ 1664 void clk_hw_init_rate_request(const struct clk_hw *hw, 1665 struct clk_rate_request *req, 1666 unsigned long rate) 1667 { 1668 if (WARN_ON(!hw || !req)) 1669 return; 1670 1671 clk_core_init_rate_req(hw->core, req, rate); 1672 } 1673 EXPORT_SYMBOL_GPL(clk_hw_init_rate_request); 1674 1675 /** 1676 * clk_hw_forward_rate_request - Forwards a clk_rate_request to a clock's parent 1677 * @hw: the original clock that got the rate request 1678 * @old_req: the original clk_rate_request structure we want to forward 1679 * @parent: the clk we want to forward @old_req to 1680 * @req: the clk_rate_request structure we want to initialise 1681 * @parent_rate: The rate which is to be requested to @parent 1682 * 1683 * Initializes a clk_rate_request structure to submit to a clock parent 1684 * in __clk_determine_rate() or similar functions. 1685 */ 1686 void clk_hw_forward_rate_request(const struct clk_hw *hw, 1687 const struct clk_rate_request *old_req, 1688 const struct clk_hw *parent, 1689 struct clk_rate_request *req, 1690 unsigned long parent_rate) 1691 { 1692 if (WARN_ON(!hw || !old_req || !parent || !req)) 1693 return; 1694 1695 clk_core_forward_rate_req(hw->core, old_req, 1696 parent->core, req, 1697 parent_rate); 1698 } 1699 EXPORT_SYMBOL_GPL(clk_hw_forward_rate_request); 1700 1701 static bool clk_core_can_round(struct clk_core * const core) 1702 { 1703 return core->ops->determine_rate || core->ops->round_rate; 1704 } 1705 1706 static int clk_core_round_rate_nolock(struct clk_core *core, 1707 struct clk_rate_request *req) 1708 { 1709 int ret; 1710 1711 lockdep_assert_held(&prepare_lock); 1712 1713 if (!core) { 1714 req->rate = 0; 1715 return 0; 1716 } 1717 1718 if (clk_core_can_round(core)) 1719 return clk_core_determine_round_nolock(core, req); 1720 1721 if (core->flags & CLK_SET_RATE_PARENT) { 1722 struct clk_rate_request parent_req; 1723 1724 clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate); 1725 1726 trace_clk_rate_request_start(&parent_req); 1727 1728 ret = clk_core_round_rate_nolock(core->parent, &parent_req); 1729 if (ret) 1730 return ret; 1731 1732 trace_clk_rate_request_done(&parent_req); 1733 1734 req->best_parent_rate = parent_req.rate; 1735 req->rate = parent_req.rate; 1736 1737 return 0; 1738 } 1739 1740 req->rate = core->rate; 1741 return 0; 1742 } 1743 1744 /** 1745 * __clk_determine_rate - get the closest rate actually supported by a clock 1746 * @hw: determine the rate of this clock 1747 * @req: target rate request 1748 * 1749 * Useful for clk_ops such as .set_rate and .determine_rate. 1750 */ 1751 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 1752 { 1753 if (!hw) { 1754 req->rate = 0; 1755 return 0; 1756 } 1757 1758 return clk_core_round_rate_nolock(hw->core, req); 1759 } 1760 EXPORT_SYMBOL_GPL(__clk_determine_rate); 1761 1762 /** 1763 * clk_hw_round_rate() - round the given rate for a hw clk 1764 * @hw: the hw clk for which we are rounding a rate 1765 * @rate: the rate which is to be rounded 1766 * 1767 * Takes in a rate as input and rounds it to a rate that the clk can actually 1768 * use. 1769 * 1770 * Context: prepare_lock must be held. 1771 * For clk providers to call from within clk_ops such as .round_rate, 1772 * .determine_rate. 1773 * 1774 * Return: returns rounded rate of hw clk if clk supports round_rate operation 1775 * else returns the parent rate. 1776 */ 1777 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) 1778 { 1779 int ret; 1780 struct clk_rate_request req; 1781 1782 clk_core_init_rate_req(hw->core, &req, rate); 1783 1784 trace_clk_rate_request_start(&req); 1785 1786 ret = clk_core_round_rate_nolock(hw->core, &req); 1787 if (ret) 1788 return 0; 1789 1790 trace_clk_rate_request_done(&req); 1791 1792 return req.rate; 1793 } 1794 EXPORT_SYMBOL_GPL(clk_hw_round_rate); 1795 1796 /** 1797 * clk_round_rate - round the given rate for a clk 1798 * @clk: the clk for which we are rounding a rate 1799 * @rate: the rate which is to be rounded 1800 * 1801 * Takes in a rate as input and rounds it to a rate that the clk can actually 1802 * use which is then returned. If clk doesn't support round_rate operation 1803 * then the parent rate is returned. 1804 */ 1805 long clk_round_rate(struct clk *clk, unsigned long rate) 1806 { 1807 struct clk_rate_request req; 1808 int ret; 1809 1810 if (!clk) 1811 return 0; 1812 1813 clk_prepare_lock(); 1814 1815 if (clk->exclusive_count) 1816 clk_core_rate_unprotect(clk->core); 1817 1818 clk_core_init_rate_req(clk->core, &req, rate); 1819 1820 trace_clk_rate_request_start(&req); 1821 1822 ret = clk_core_round_rate_nolock(clk->core, &req); 1823 1824 trace_clk_rate_request_done(&req); 1825 1826 if (clk->exclusive_count) 1827 clk_core_rate_protect(clk->core); 1828 1829 clk_prepare_unlock(); 1830 1831 if (ret) 1832 return ret; 1833 1834 return req.rate; 1835 } 1836 EXPORT_SYMBOL_GPL(clk_round_rate); 1837 1838 /** 1839 * __clk_notify - call clk notifier chain 1840 * @core: clk that is changing rate 1841 * @msg: clk notifier type (see include/linux/clk.h) 1842 * @old_rate: old clk rate 1843 * @new_rate: new clk rate 1844 * 1845 * Triggers a notifier call chain on the clk rate-change notification 1846 * for 'clk'. Passes a pointer to the struct clk and the previous 1847 * and current rates to the notifier callback. Intended to be called by 1848 * internal clock code only. Returns NOTIFY_DONE from the last driver 1849 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1850 * a driver returns that. 1851 */ 1852 static int __clk_notify(struct clk_core *core, unsigned long msg, 1853 unsigned long old_rate, unsigned long new_rate) 1854 { 1855 struct clk_notifier *cn; 1856 struct clk_notifier_data cnd; 1857 int ret = NOTIFY_DONE; 1858 1859 cnd.old_rate = old_rate; 1860 cnd.new_rate = new_rate; 1861 1862 list_for_each_entry(cn, &clk_notifier_list, node) { 1863 if (cn->clk->core == core) { 1864 cnd.clk = cn->clk; 1865 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1866 &cnd); 1867 if (ret & NOTIFY_STOP_MASK) 1868 return ret; 1869 } 1870 } 1871 1872 return ret; 1873 } 1874 1875 /** 1876 * __clk_recalc_accuracies 1877 * @core: first clk in the subtree 1878 * 1879 * Walks the subtree of clks starting with clk and recalculates accuracies as 1880 * it goes. Note that if a clk does not implement the .recalc_accuracy 1881 * callback then it is assumed that the clock will take on the accuracy of its 1882 * parent. 1883 */ 1884 static void __clk_recalc_accuracies(struct clk_core *core) 1885 { 1886 unsigned long parent_accuracy = 0; 1887 struct clk_core *child; 1888 1889 lockdep_assert_held(&prepare_lock); 1890 1891 if (core->parent) 1892 parent_accuracy = core->parent->accuracy; 1893 1894 if (core->ops->recalc_accuracy) 1895 core->accuracy = core->ops->recalc_accuracy(core->hw, 1896 parent_accuracy); 1897 else 1898 core->accuracy = parent_accuracy; 1899 1900 hlist_for_each_entry(child, &core->children, child_node) 1901 __clk_recalc_accuracies(child); 1902 } 1903 1904 static long clk_core_get_accuracy_recalc(struct clk_core *core) 1905 { 1906 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) 1907 __clk_recalc_accuracies(core); 1908 1909 return clk_core_get_accuracy_no_lock(core); 1910 } 1911 1912 /** 1913 * clk_get_accuracy - return the accuracy of clk 1914 * @clk: the clk whose accuracy is being returned 1915 * 1916 * Simply returns the cached accuracy of the clk, unless 1917 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be 1918 * issued. 1919 * If clk is NULL then returns 0. 1920 */ 1921 long clk_get_accuracy(struct clk *clk) 1922 { 1923 long accuracy; 1924 1925 if (!clk) 1926 return 0; 1927 1928 clk_prepare_lock(); 1929 accuracy = clk_core_get_accuracy_recalc(clk->core); 1930 clk_prepare_unlock(); 1931 1932 return accuracy; 1933 } 1934 EXPORT_SYMBOL_GPL(clk_get_accuracy); 1935 1936 static unsigned long clk_recalc(struct clk_core *core, 1937 unsigned long parent_rate) 1938 { 1939 unsigned long rate = parent_rate; 1940 1941 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { 1942 rate = core->ops->recalc_rate(core->hw, parent_rate); 1943 clk_pm_runtime_put(core); 1944 } 1945 return rate; 1946 } 1947 1948 /** 1949 * __clk_recalc_rates 1950 * @core: first clk in the subtree 1951 * @update_req: Whether req_rate should be updated with the new rate 1952 * @msg: notification type (see include/linux/clk.h) 1953 * 1954 * Walks the subtree of clks starting with clk and recalculates rates as it 1955 * goes. Note that if a clk does not implement the .recalc_rate callback then 1956 * it is assumed that the clock will take on the rate of its parent. 1957 * 1958 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1959 * if necessary. 1960 */ 1961 static void __clk_recalc_rates(struct clk_core *core, bool update_req, 1962 unsigned long msg) 1963 { 1964 unsigned long old_rate; 1965 unsigned long parent_rate = 0; 1966 struct clk_core *child; 1967 1968 lockdep_assert_held(&prepare_lock); 1969 1970 old_rate = core->rate; 1971 1972 if (core->parent) 1973 parent_rate = core->parent->rate; 1974 1975 core->rate = clk_recalc(core, parent_rate); 1976 if (update_req) 1977 core->req_rate = core->rate; 1978 1979 /* 1980 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1981 * & ABORT_RATE_CHANGE notifiers 1982 */ 1983 if (core->notifier_count && msg) 1984 __clk_notify(core, msg, old_rate, core->rate); 1985 1986 hlist_for_each_entry(child, &core->children, child_node) 1987 __clk_recalc_rates(child, update_req, msg); 1988 } 1989 1990 static unsigned long clk_core_get_rate_recalc(struct clk_core *core) 1991 { 1992 if (core && (core->flags & CLK_GET_RATE_NOCACHE)) 1993 __clk_recalc_rates(core, false, 0); 1994 1995 return clk_core_get_rate_nolock(core); 1996 } 1997 1998 /** 1999 * clk_get_rate - return the rate of clk 2000 * @clk: the clk whose rate is being returned 2001 * 2002 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 2003 * is set, which means a recalc_rate will be issued. Can be called regardless of 2004 * the clock enabledness. If clk is NULL, or if an error occurred, then returns 2005 * 0. 2006 */ 2007 unsigned long clk_get_rate(struct clk *clk) 2008 { 2009 unsigned long rate; 2010 2011 if (!clk) 2012 return 0; 2013 2014 clk_prepare_lock(); 2015 rate = clk_core_get_rate_recalc(clk->core); 2016 clk_prepare_unlock(); 2017 2018 return rate; 2019 } 2020 EXPORT_SYMBOL_GPL(clk_get_rate); 2021 2022 static int clk_fetch_parent_index(struct clk_core *core, 2023 struct clk_core *parent) 2024 { 2025 int i; 2026 2027 if (!parent) 2028 return -EINVAL; 2029 2030 for (i = 0; i < core->num_parents; i++) { 2031 /* Found it first try! */ 2032 if (core->parents[i].core == parent) 2033 return i; 2034 2035 /* Something else is here, so keep looking */ 2036 if (core->parents[i].core) 2037 continue; 2038 2039 /* Maybe core hasn't been cached but the hw is all we know? */ 2040 if (core->parents[i].hw) { 2041 if (core->parents[i].hw == parent->hw) 2042 break; 2043 2044 /* Didn't match, but we're expecting a clk_hw */ 2045 continue; 2046 } 2047 2048 /* Maybe it hasn't been cached (clk_set_parent() path) */ 2049 if (parent == clk_core_get(core, i)) 2050 break; 2051 2052 /* Fallback to comparing globally unique names */ 2053 if (core->parents[i].name && 2054 !strcmp(parent->name, core->parents[i].name)) 2055 break; 2056 } 2057 2058 if (i == core->num_parents) 2059 return -EINVAL; 2060 2061 core->parents[i].core = parent; 2062 return i; 2063 } 2064 2065 /** 2066 * clk_hw_get_parent_index - return the index of the parent clock 2067 * @hw: clk_hw associated with the clk being consumed 2068 * 2069 * Fetches and returns the index of parent clock. Returns -EINVAL if the given 2070 * clock does not have a current parent. 2071 */ 2072 int clk_hw_get_parent_index(struct clk_hw *hw) 2073 { 2074 struct clk_hw *parent = clk_hw_get_parent(hw); 2075 2076 if (WARN_ON(parent == NULL)) 2077 return -EINVAL; 2078 2079 return clk_fetch_parent_index(hw->core, parent->core); 2080 } 2081 EXPORT_SYMBOL_GPL(clk_hw_get_parent_index); 2082 2083 /* 2084 * Update the orphan status of @core and all its children. 2085 */ 2086 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) 2087 { 2088 struct clk_core *child; 2089 2090 core->orphan = is_orphan; 2091 2092 hlist_for_each_entry(child, &core->children, child_node) 2093 clk_core_update_orphan_status(child, is_orphan); 2094 } 2095 2096 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) 2097 { 2098 bool was_orphan = core->orphan; 2099 2100 hlist_del(&core->child_node); 2101 2102 if (new_parent) { 2103 bool becomes_orphan = new_parent->orphan; 2104 2105 /* avoid duplicate POST_RATE_CHANGE notifications */ 2106 if (new_parent->new_child == core) 2107 new_parent->new_child = NULL; 2108 2109 hlist_add_head(&core->child_node, &new_parent->children); 2110 2111 if (was_orphan != becomes_orphan) 2112 clk_core_update_orphan_status(core, becomes_orphan); 2113 } else { 2114 hlist_add_head(&core->child_node, &clk_orphan_list); 2115 if (!was_orphan) 2116 clk_core_update_orphan_status(core, true); 2117 } 2118 2119 core->parent = new_parent; 2120 } 2121 2122 static struct clk_core *__clk_set_parent_before(struct clk_core *core, 2123 struct clk_core *parent) 2124 { 2125 unsigned long flags; 2126 struct clk_core *old_parent = core->parent; 2127 2128 /* 2129 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock 2130 * 2131 * 2. Migrate prepare state between parents and prevent race with 2132 * clk_enable(). 2133 * 2134 * If the clock is not prepared, then a race with 2135 * clk_enable/disable() is impossible since we already have the 2136 * prepare lock (future calls to clk_enable() need to be preceded by 2137 * a clk_prepare()). 2138 * 2139 * If the clock is prepared, migrate the prepared state to the new 2140 * parent and also protect against a race with clk_enable() by 2141 * forcing the clock and the new parent on. This ensures that all 2142 * future calls to clk_enable() are practically NOPs with respect to 2143 * hardware and software states. 2144 * 2145 * See also: Comment for clk_set_parent() below. 2146 */ 2147 2148 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ 2149 if (core->flags & CLK_OPS_PARENT_ENABLE) { 2150 clk_core_prepare_enable(old_parent); 2151 clk_core_prepare_enable(parent); 2152 } 2153 2154 /* migrate prepare count if > 0 */ 2155 if (core->prepare_count) { 2156 clk_core_prepare_enable(parent); 2157 clk_core_enable_lock(core); 2158 } 2159 2160 /* update the clk tree topology */ 2161 flags = clk_enable_lock(); 2162 clk_reparent(core, parent); 2163 clk_enable_unlock(flags); 2164 2165 return old_parent; 2166 } 2167 2168 static void __clk_set_parent_after(struct clk_core *core, 2169 struct clk_core *parent, 2170 struct clk_core *old_parent) 2171 { 2172 /* 2173 * Finish the migration of prepare state and undo the changes done 2174 * for preventing a race with clk_enable(). 2175 */ 2176 if (core->prepare_count) { 2177 clk_core_disable_lock(core); 2178 clk_core_disable_unprepare(old_parent); 2179 } 2180 2181 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ 2182 if (core->flags & CLK_OPS_PARENT_ENABLE) { 2183 clk_core_disable_unprepare(parent); 2184 clk_core_disable_unprepare(old_parent); 2185 } 2186 } 2187 2188 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, 2189 u8 p_index) 2190 { 2191 unsigned long flags; 2192 int ret = 0; 2193 struct clk_core *old_parent; 2194 2195 old_parent = __clk_set_parent_before(core, parent); 2196 2197 trace_clk_set_parent(core, parent); 2198 2199 /* change clock input source */ 2200 if (parent && core->ops->set_parent) 2201 ret = core->ops->set_parent(core->hw, p_index); 2202 2203 trace_clk_set_parent_complete(core, parent); 2204 2205 if (ret) { 2206 flags = clk_enable_lock(); 2207 clk_reparent(core, old_parent); 2208 clk_enable_unlock(flags); 2209 2210 __clk_set_parent_after(core, old_parent, parent); 2211 2212 return ret; 2213 } 2214 2215 __clk_set_parent_after(core, parent, old_parent); 2216 2217 return 0; 2218 } 2219 2220 /** 2221 * __clk_speculate_rates 2222 * @core: first clk in the subtree 2223 * @parent_rate: the "future" rate of clk's parent 2224 * 2225 * Walks the subtree of clks starting with clk, speculating rates as it 2226 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 2227 * 2228 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 2229 * pre-rate change notifications and returns early if no clks in the 2230 * subtree have subscribed to the notifications. Note that if a clk does not 2231 * implement the .recalc_rate callback then it is assumed that the clock will 2232 * take on the rate of its parent. 2233 */ 2234 static int __clk_speculate_rates(struct clk_core *core, 2235 unsigned long parent_rate) 2236 { 2237 struct clk_core *child; 2238 unsigned long new_rate; 2239 int ret = NOTIFY_DONE; 2240 2241 lockdep_assert_held(&prepare_lock); 2242 2243 new_rate = clk_recalc(core, parent_rate); 2244 2245 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 2246 if (core->notifier_count) 2247 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); 2248 2249 if (ret & NOTIFY_STOP_MASK) { 2250 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", 2251 __func__, core->name, ret); 2252 goto out; 2253 } 2254 2255 hlist_for_each_entry(child, &core->children, child_node) { 2256 ret = __clk_speculate_rates(child, new_rate); 2257 if (ret & NOTIFY_STOP_MASK) 2258 break; 2259 } 2260 2261 out: 2262 return ret; 2263 } 2264 2265 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, 2266 struct clk_core *new_parent, u8 p_index) 2267 { 2268 struct clk_core *child; 2269 2270 core->new_rate = new_rate; 2271 core->new_parent = new_parent; 2272 core->new_parent_index = p_index; 2273 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 2274 core->new_child = NULL; 2275 if (new_parent && new_parent != core->parent) 2276 new_parent->new_child = core; 2277 2278 hlist_for_each_entry(child, &core->children, child_node) { 2279 child->new_rate = clk_recalc(child, new_rate); 2280 clk_calc_subtree(child, child->new_rate, NULL, 0); 2281 } 2282 } 2283 2284 /* 2285 * calculate the new rates returning the topmost clock that has to be 2286 * changed. 2287 */ 2288 static struct clk_core *clk_calc_new_rates(struct clk_core *core, 2289 unsigned long rate) 2290 { 2291 struct clk_core *top = core; 2292 struct clk_core *old_parent, *parent; 2293 unsigned long best_parent_rate = 0; 2294 unsigned long new_rate; 2295 unsigned long min_rate; 2296 unsigned long max_rate; 2297 int p_index = 0; 2298 int ret; 2299 2300 /* sanity */ 2301 if (IS_ERR_OR_NULL(core)) 2302 return NULL; 2303 2304 /* save parent rate, if it exists */ 2305 parent = old_parent = core->parent; 2306 if (parent) 2307 best_parent_rate = parent->rate; 2308 2309 clk_core_get_boundaries(core, &min_rate, &max_rate); 2310 2311 /* find the closest rate and parent clk/rate */ 2312 if (clk_core_can_round(core)) { 2313 struct clk_rate_request req; 2314 2315 clk_core_init_rate_req(core, &req, rate); 2316 2317 trace_clk_rate_request_start(&req); 2318 2319 ret = clk_core_determine_round_nolock(core, &req); 2320 if (ret < 0) 2321 return NULL; 2322 2323 trace_clk_rate_request_done(&req); 2324 2325 best_parent_rate = req.best_parent_rate; 2326 new_rate = req.rate; 2327 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; 2328 2329 if (new_rate < min_rate || new_rate > max_rate) 2330 return NULL; 2331 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { 2332 /* pass-through clock without adjustable parent */ 2333 core->new_rate = core->rate; 2334 return NULL; 2335 } else { 2336 /* pass-through clock with adjustable parent */ 2337 top = clk_calc_new_rates(parent, rate); 2338 new_rate = parent->new_rate; 2339 goto out; 2340 } 2341 2342 /* some clocks must be gated to change parent */ 2343 if (parent != old_parent && 2344 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { 2345 pr_debug("%s: %s not gated but wants to reparent\n", 2346 __func__, core->name); 2347 return NULL; 2348 } 2349 2350 /* try finding the new parent index */ 2351 if (parent && core->num_parents > 1) { 2352 p_index = clk_fetch_parent_index(core, parent); 2353 if (p_index < 0) { 2354 pr_debug("%s: clk %s can not be parent of clk %s\n", 2355 __func__, parent->name, core->name); 2356 return NULL; 2357 } 2358 } 2359 2360 if ((core->flags & CLK_SET_RATE_PARENT) && parent && 2361 best_parent_rate != parent->rate) 2362 top = clk_calc_new_rates(parent, best_parent_rate); 2363 2364 out: 2365 clk_calc_subtree(core, new_rate, parent, p_index); 2366 2367 return top; 2368 } 2369 2370 /* 2371 * Notify about rate changes in a subtree. Always walk down the whole tree 2372 * so that in case of an error we can walk down the whole tree again and 2373 * abort the change. 2374 */ 2375 static struct clk_core *clk_propagate_rate_change(struct clk_core *core, 2376 unsigned long event) 2377 { 2378 struct clk_core *child, *tmp_clk, *fail_clk = NULL; 2379 int ret = NOTIFY_DONE; 2380 2381 if (core->rate == core->new_rate) 2382 return NULL; 2383 2384 if (core->notifier_count) { 2385 ret = __clk_notify(core, event, core->rate, core->new_rate); 2386 if (ret & NOTIFY_STOP_MASK) 2387 fail_clk = core; 2388 } 2389 2390 hlist_for_each_entry(child, &core->children, child_node) { 2391 /* Skip children who will be reparented to another clock */ 2392 if (child->new_parent && child->new_parent != core) 2393 continue; 2394 tmp_clk = clk_propagate_rate_change(child, event); 2395 if (tmp_clk) 2396 fail_clk = tmp_clk; 2397 } 2398 2399 /* handle the new child who might not be in core->children yet */ 2400 if (core->new_child) { 2401 tmp_clk = clk_propagate_rate_change(core->new_child, event); 2402 if (tmp_clk) 2403 fail_clk = tmp_clk; 2404 } 2405 2406 return fail_clk; 2407 } 2408 2409 /* 2410 * walk down a subtree and set the new rates notifying the rate 2411 * change on the way 2412 */ 2413 static void clk_change_rate(struct clk_core *core) 2414 { 2415 struct clk_core *child; 2416 struct hlist_node *tmp; 2417 unsigned long old_rate; 2418 unsigned long best_parent_rate = 0; 2419 bool skip_set_rate = false; 2420 struct clk_core *old_parent; 2421 struct clk_core *parent = NULL; 2422 2423 old_rate = core->rate; 2424 2425 if (core->new_parent) { 2426 parent = core->new_parent; 2427 best_parent_rate = core->new_parent->rate; 2428 } else if (core->parent) { 2429 parent = core->parent; 2430 best_parent_rate = core->parent->rate; 2431 } 2432 2433 if (clk_pm_runtime_get(core)) 2434 return; 2435 2436 if (core->flags & CLK_SET_RATE_UNGATE) { 2437 clk_core_prepare(core); 2438 clk_core_enable_lock(core); 2439 } 2440 2441 if (core->new_parent && core->new_parent != core->parent) { 2442 old_parent = __clk_set_parent_before(core, core->new_parent); 2443 trace_clk_set_parent(core, core->new_parent); 2444 2445 if (core->ops->set_rate_and_parent) { 2446 skip_set_rate = true; 2447 core->ops->set_rate_and_parent(core->hw, core->new_rate, 2448 best_parent_rate, 2449 core->new_parent_index); 2450 } else if (core->ops->set_parent) { 2451 core->ops->set_parent(core->hw, core->new_parent_index); 2452 } 2453 2454 trace_clk_set_parent_complete(core, core->new_parent); 2455 __clk_set_parent_after(core, core->new_parent, old_parent); 2456 } 2457 2458 if (core->flags & CLK_OPS_PARENT_ENABLE) 2459 clk_core_prepare_enable(parent); 2460 2461 trace_clk_set_rate(core, core->new_rate); 2462 2463 if (!skip_set_rate && core->ops->set_rate) 2464 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); 2465 2466 trace_clk_set_rate_complete(core, core->new_rate); 2467 2468 core->rate = clk_recalc(core, best_parent_rate); 2469 2470 if (core->flags & CLK_SET_RATE_UNGATE) { 2471 clk_core_disable_lock(core); 2472 clk_core_unprepare(core); 2473 } 2474 2475 if (core->flags & CLK_OPS_PARENT_ENABLE) 2476 clk_core_disable_unprepare(parent); 2477 2478 if (core->notifier_count && old_rate != core->rate) 2479 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); 2480 2481 if (core->flags & CLK_RECALC_NEW_RATES) 2482 (void)clk_calc_new_rates(core, core->new_rate); 2483 2484 /* 2485 * Use safe iteration, as change_rate can actually swap parents 2486 * for certain clock types. 2487 */ 2488 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { 2489 /* Skip children who will be reparented to another clock */ 2490 if (child->new_parent && child->new_parent != core) 2491 continue; 2492 clk_change_rate(child); 2493 } 2494 2495 /* handle the new child who might not be in core->children yet */ 2496 if (core->new_child) 2497 clk_change_rate(core->new_child); 2498 2499 clk_pm_runtime_put(core); 2500 } 2501 2502 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, 2503 unsigned long req_rate) 2504 { 2505 int ret, cnt; 2506 struct clk_rate_request req; 2507 2508 lockdep_assert_held(&prepare_lock); 2509 2510 if (!core) 2511 return 0; 2512 2513 /* simulate what the rate would be if it could be freely set */ 2514 cnt = clk_core_rate_nuke_protect(core); 2515 if (cnt < 0) 2516 return cnt; 2517 2518 clk_core_init_rate_req(core, &req, req_rate); 2519 2520 trace_clk_rate_request_start(&req); 2521 2522 ret = clk_core_round_rate_nolock(core, &req); 2523 2524 trace_clk_rate_request_done(&req); 2525 2526 /* restore the protection */ 2527 clk_core_rate_restore_protect(core, cnt); 2528 2529 return ret ? 0 : req.rate; 2530 } 2531 2532 static int clk_core_set_rate_nolock(struct clk_core *core, 2533 unsigned long req_rate) 2534 { 2535 struct clk_core *top, *fail_clk; 2536 unsigned long rate; 2537 int ret; 2538 2539 if (!core) 2540 return 0; 2541 2542 rate = clk_core_req_round_rate_nolock(core, req_rate); 2543 2544 /* bail early if nothing to do */ 2545 if (rate == clk_core_get_rate_nolock(core)) 2546 return 0; 2547 2548 /* fail on a direct rate set of a protected provider */ 2549 if (clk_core_rate_is_protected(core)) 2550 return -EBUSY; 2551 2552 /* calculate new rates and get the topmost changed clock */ 2553 top = clk_calc_new_rates(core, req_rate); 2554 if (!top) 2555 return -EINVAL; 2556 2557 ret = clk_pm_runtime_get(core); 2558 if (ret) 2559 return ret; 2560 2561 /* notify that we are about to change rates */ 2562 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 2563 if (fail_clk) { 2564 pr_debug("%s: failed to set %s rate\n", __func__, 2565 fail_clk->name); 2566 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 2567 ret = -EBUSY; 2568 goto err; 2569 } 2570 2571 /* change the rates */ 2572 clk_change_rate(top); 2573 2574 core->req_rate = req_rate; 2575 err: 2576 clk_pm_runtime_put(core); 2577 2578 return ret; 2579 } 2580 2581 /** 2582 * clk_set_rate - specify a new rate for clk 2583 * @clk: the clk whose rate is being changed 2584 * @rate: the new rate for clk 2585 * 2586 * In the simplest case clk_set_rate will only adjust the rate of clk. 2587 * 2588 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 2589 * propagate up to clk's parent; whether or not this happens depends on the 2590 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 2591 * after calling .round_rate then upstream parent propagation is ignored. If 2592 * *parent_rate comes back with a new rate for clk's parent then we propagate 2593 * up to clk's parent and set its rate. Upward propagation will continue 2594 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 2595 * .round_rate stops requesting changes to clk's parent_rate. 2596 * 2597 * Rate changes are accomplished via tree traversal that also recalculates the 2598 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 2599 * 2600 * Returns 0 on success, -EERROR otherwise. 2601 */ 2602 int clk_set_rate(struct clk *clk, unsigned long rate) 2603 { 2604 int ret; 2605 2606 if (!clk) 2607 return 0; 2608 2609 /* prevent racing with updates to the clock topology */ 2610 clk_prepare_lock(); 2611 2612 if (clk->exclusive_count) 2613 clk_core_rate_unprotect(clk->core); 2614 2615 ret = clk_core_set_rate_nolock(clk->core, rate); 2616 2617 if (clk->exclusive_count) 2618 clk_core_rate_protect(clk->core); 2619 2620 clk_prepare_unlock(); 2621 2622 return ret; 2623 } 2624 EXPORT_SYMBOL_GPL(clk_set_rate); 2625 2626 /** 2627 * clk_set_rate_exclusive - specify a new rate and get exclusive control 2628 * @clk: the clk whose rate is being changed 2629 * @rate: the new rate for clk 2630 * 2631 * This is a combination of clk_set_rate() and clk_rate_exclusive_get() 2632 * within a critical section 2633 * 2634 * This can be used initially to ensure that at least 1 consumer is 2635 * satisfied when several consumers are competing for exclusivity over the 2636 * same clock provider. 2637 * 2638 * The exclusivity is not applied if setting the rate failed. 2639 * 2640 * Calls to clk_rate_exclusive_get() should be balanced with calls to 2641 * clk_rate_exclusive_put(). 2642 * 2643 * Returns 0 on success, -EERROR otherwise. 2644 */ 2645 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 2646 { 2647 int ret; 2648 2649 if (!clk) 2650 return 0; 2651 2652 /* prevent racing with updates to the clock topology */ 2653 clk_prepare_lock(); 2654 2655 /* 2656 * The temporary protection removal is not here, on purpose 2657 * This function is meant to be used instead of clk_rate_protect, 2658 * so before the consumer code path protect the clock provider 2659 */ 2660 2661 ret = clk_core_set_rate_nolock(clk->core, rate); 2662 if (!ret) { 2663 clk_core_rate_protect(clk->core); 2664 clk->exclusive_count++; 2665 } 2666 2667 clk_prepare_unlock(); 2668 2669 return ret; 2670 } 2671 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); 2672 2673 static int clk_set_rate_range_nolock(struct clk *clk, 2674 unsigned long min, 2675 unsigned long max) 2676 { 2677 int ret = 0; 2678 unsigned long old_min, old_max, rate; 2679 2680 lockdep_assert_held(&prepare_lock); 2681 2682 if (!clk) 2683 return 0; 2684 2685 trace_clk_set_rate_range(clk->core, min, max); 2686 2687 if (min > max) { 2688 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", 2689 __func__, clk->core->name, clk->dev_id, clk->con_id, 2690 min, max); 2691 return -EINVAL; 2692 } 2693 2694 if (clk->exclusive_count) 2695 clk_core_rate_unprotect(clk->core); 2696 2697 /* Save the current values in case we need to rollback the change */ 2698 old_min = clk->min_rate; 2699 old_max = clk->max_rate; 2700 clk->min_rate = min; 2701 clk->max_rate = max; 2702 2703 if (!clk_core_check_boundaries(clk->core, min, max)) { 2704 ret = -EINVAL; 2705 goto out; 2706 } 2707 2708 rate = clk->core->req_rate; 2709 if (clk->core->flags & CLK_GET_RATE_NOCACHE) 2710 rate = clk_core_get_rate_recalc(clk->core); 2711 2712 /* 2713 * Since the boundaries have been changed, let's give the 2714 * opportunity to the provider to adjust the clock rate based on 2715 * the new boundaries. 2716 * 2717 * We also need to handle the case where the clock is currently 2718 * outside of the boundaries. Clamping the last requested rate 2719 * to the current minimum and maximum will also handle this. 2720 * 2721 * FIXME: 2722 * There is a catch. It may fail for the usual reason (clock 2723 * broken, clock protected, etc) but also because: 2724 * - round_rate() was not favorable and fell on the wrong 2725 * side of the boundary 2726 * - the determine_rate() callback does not really check for 2727 * this corner case when determining the rate 2728 */ 2729 rate = clamp(rate, min, max); 2730 ret = clk_core_set_rate_nolock(clk->core, rate); 2731 if (ret) { 2732 /* rollback the changes */ 2733 clk->min_rate = old_min; 2734 clk->max_rate = old_max; 2735 } 2736 2737 out: 2738 if (clk->exclusive_count) 2739 clk_core_rate_protect(clk->core); 2740 2741 return ret; 2742 } 2743 2744 /** 2745 * clk_set_rate_range - set a rate range for a clock source 2746 * @clk: clock source 2747 * @min: desired minimum clock rate in Hz, inclusive 2748 * @max: desired maximum clock rate in Hz, inclusive 2749 * 2750 * Return: 0 for success or negative errno on failure. 2751 */ 2752 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) 2753 { 2754 int ret; 2755 2756 if (!clk) 2757 return 0; 2758 2759 clk_prepare_lock(); 2760 2761 ret = clk_set_rate_range_nolock(clk, min, max); 2762 2763 clk_prepare_unlock(); 2764 2765 return ret; 2766 } 2767 EXPORT_SYMBOL_GPL(clk_set_rate_range); 2768 2769 /** 2770 * clk_set_min_rate - set a minimum clock rate for a clock source 2771 * @clk: clock source 2772 * @rate: desired minimum clock rate in Hz, inclusive 2773 * 2774 * Returns success (0) or negative errno. 2775 */ 2776 int clk_set_min_rate(struct clk *clk, unsigned long rate) 2777 { 2778 if (!clk) 2779 return 0; 2780 2781 trace_clk_set_min_rate(clk->core, rate); 2782 2783 return clk_set_rate_range(clk, rate, clk->max_rate); 2784 } 2785 EXPORT_SYMBOL_GPL(clk_set_min_rate); 2786 2787 /** 2788 * clk_set_max_rate - set a maximum clock rate for a clock source 2789 * @clk: clock source 2790 * @rate: desired maximum clock rate in Hz, inclusive 2791 * 2792 * Returns success (0) or negative errno. 2793 */ 2794 int clk_set_max_rate(struct clk *clk, unsigned long rate) 2795 { 2796 if (!clk) 2797 return 0; 2798 2799 trace_clk_set_max_rate(clk->core, rate); 2800 2801 return clk_set_rate_range(clk, clk->min_rate, rate); 2802 } 2803 EXPORT_SYMBOL_GPL(clk_set_max_rate); 2804 2805 /** 2806 * clk_get_parent - return the parent of a clk 2807 * @clk: the clk whose parent gets returned 2808 * 2809 * Simply returns clk->parent. Returns NULL if clk is NULL. 2810 */ 2811 struct clk *clk_get_parent(struct clk *clk) 2812 { 2813 struct clk *parent; 2814 2815 if (!clk) 2816 return NULL; 2817 2818 clk_prepare_lock(); 2819 /* TODO: Create a per-user clk and change callers to call clk_put */ 2820 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; 2821 clk_prepare_unlock(); 2822 2823 return parent; 2824 } 2825 EXPORT_SYMBOL_GPL(clk_get_parent); 2826 2827 static struct clk_core *__clk_init_parent(struct clk_core *core) 2828 { 2829 u8 index = 0; 2830 2831 if (core->num_parents > 1 && core->ops->get_parent) 2832 index = core->ops->get_parent(core->hw); 2833 2834 return clk_core_get_parent_by_index(core, index); 2835 } 2836 2837 static void clk_core_reparent(struct clk_core *core, 2838 struct clk_core *new_parent) 2839 { 2840 clk_reparent(core, new_parent); 2841 __clk_recalc_accuracies(core); 2842 __clk_recalc_rates(core, true, POST_RATE_CHANGE); 2843 } 2844 2845 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) 2846 { 2847 if (!hw) 2848 return; 2849 2850 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); 2851 } 2852 2853 /** 2854 * clk_has_parent - check if a clock is a possible parent for another 2855 * @clk: clock source 2856 * @parent: parent clock source 2857 * 2858 * This function can be used in drivers that need to check that a clock can be 2859 * the parent of another without actually changing the parent. 2860 * 2861 * Returns true if @parent is a possible parent for @clk, false otherwise. 2862 */ 2863 bool clk_has_parent(const struct clk *clk, const struct clk *parent) 2864 { 2865 /* NULL clocks should be nops, so return success if either is NULL. */ 2866 if (!clk || !parent) 2867 return true; 2868 2869 return clk_core_has_parent(clk->core, parent->core); 2870 } 2871 EXPORT_SYMBOL_GPL(clk_has_parent); 2872 2873 static int clk_core_set_parent_nolock(struct clk_core *core, 2874 struct clk_core *parent) 2875 { 2876 int ret = 0; 2877 int p_index = 0; 2878 unsigned long p_rate = 0; 2879 2880 lockdep_assert_held(&prepare_lock); 2881 2882 if (!core) 2883 return 0; 2884 2885 if (core->parent == parent) 2886 return 0; 2887 2888 /* verify ops for multi-parent clks */ 2889 if (core->num_parents > 1 && !core->ops->set_parent) 2890 return -EPERM; 2891 2892 /* check that we are allowed to re-parent if the clock is in use */ 2893 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) 2894 return -EBUSY; 2895 2896 if (clk_core_rate_is_protected(core)) 2897 return -EBUSY; 2898 2899 /* try finding the new parent index */ 2900 if (parent) { 2901 p_index = clk_fetch_parent_index(core, parent); 2902 if (p_index < 0) { 2903 pr_debug("%s: clk %s can not be parent of clk %s\n", 2904 __func__, parent->name, core->name); 2905 return p_index; 2906 } 2907 p_rate = parent->rate; 2908 } 2909 2910 ret = clk_pm_runtime_get(core); 2911 if (ret) 2912 return ret; 2913 2914 /* propagate PRE_RATE_CHANGE notifications */ 2915 ret = __clk_speculate_rates(core, p_rate); 2916 2917 /* abort if a driver objects */ 2918 if (ret & NOTIFY_STOP_MASK) 2919 goto runtime_put; 2920 2921 /* do the re-parent */ 2922 ret = __clk_set_parent(core, parent, p_index); 2923 2924 /* propagate rate an accuracy recalculation accordingly */ 2925 if (ret) { 2926 __clk_recalc_rates(core, true, ABORT_RATE_CHANGE); 2927 } else { 2928 __clk_recalc_rates(core, true, POST_RATE_CHANGE); 2929 __clk_recalc_accuracies(core); 2930 } 2931 2932 runtime_put: 2933 clk_pm_runtime_put(core); 2934 2935 return ret; 2936 } 2937 2938 int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent) 2939 { 2940 return clk_core_set_parent_nolock(hw->core, parent->core); 2941 } 2942 EXPORT_SYMBOL_GPL(clk_hw_set_parent); 2943 2944 /** 2945 * clk_set_parent - switch the parent of a mux clk 2946 * @clk: the mux clk whose input we are switching 2947 * @parent: the new input to clk 2948 * 2949 * Re-parent clk to use parent as its new input source. If clk is in 2950 * prepared state, the clk will get enabled for the duration of this call. If 2951 * that's not acceptable for a specific clk (Eg: the consumer can't handle 2952 * that, the reparenting is glitchy in hardware, etc), use the 2953 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 2954 * 2955 * After successfully changing clk's parent clk_set_parent will update the 2956 * clk topology, sysfs topology and propagate rate recalculation via 2957 * __clk_recalc_rates. 2958 * 2959 * Returns 0 on success, -EERROR otherwise. 2960 */ 2961 int clk_set_parent(struct clk *clk, struct clk *parent) 2962 { 2963 int ret; 2964 2965 if (!clk) 2966 return 0; 2967 2968 clk_prepare_lock(); 2969 2970 if (clk->exclusive_count) 2971 clk_core_rate_unprotect(clk->core); 2972 2973 ret = clk_core_set_parent_nolock(clk->core, 2974 parent ? parent->core : NULL); 2975 2976 if (clk->exclusive_count) 2977 clk_core_rate_protect(clk->core); 2978 2979 clk_prepare_unlock(); 2980 2981 return ret; 2982 } 2983 EXPORT_SYMBOL_GPL(clk_set_parent); 2984 2985 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) 2986 { 2987 int ret = -EINVAL; 2988 2989 lockdep_assert_held(&prepare_lock); 2990 2991 if (!core) 2992 return 0; 2993 2994 if (clk_core_rate_is_protected(core)) 2995 return -EBUSY; 2996 2997 trace_clk_set_phase(core, degrees); 2998 2999 if (core->ops->set_phase) { 3000 ret = core->ops->set_phase(core->hw, degrees); 3001 if (!ret) 3002 core->phase = degrees; 3003 } 3004 3005 trace_clk_set_phase_complete(core, degrees); 3006 3007 return ret; 3008 } 3009 3010 /** 3011 * clk_set_phase - adjust the phase shift of a clock signal 3012 * @clk: clock signal source 3013 * @degrees: number of degrees the signal is shifted 3014 * 3015 * Shifts the phase of a clock signal by the specified 3016 * degrees. Returns 0 on success, -EERROR otherwise. 3017 * 3018 * This function makes no distinction about the input or reference 3019 * signal that we adjust the clock signal phase against. For example 3020 * phase locked-loop clock signal generators we may shift phase with 3021 * respect to feedback clock signal input, but for other cases the 3022 * clock phase may be shifted with respect to some other, unspecified 3023 * signal. 3024 * 3025 * Additionally the concept of phase shift does not propagate through 3026 * the clock tree hierarchy, which sets it apart from clock rates and 3027 * clock accuracy. A parent clock phase attribute does not have an 3028 * impact on the phase attribute of a child clock. 3029 */ 3030 int clk_set_phase(struct clk *clk, int degrees) 3031 { 3032 int ret; 3033 3034 if (!clk) 3035 return 0; 3036 3037 /* sanity check degrees */ 3038 degrees %= 360; 3039 if (degrees < 0) 3040 degrees += 360; 3041 3042 clk_prepare_lock(); 3043 3044 if (clk->exclusive_count) 3045 clk_core_rate_unprotect(clk->core); 3046 3047 ret = clk_core_set_phase_nolock(clk->core, degrees); 3048 3049 if (clk->exclusive_count) 3050 clk_core_rate_protect(clk->core); 3051 3052 clk_prepare_unlock(); 3053 3054 return ret; 3055 } 3056 EXPORT_SYMBOL_GPL(clk_set_phase); 3057 3058 static int clk_core_get_phase(struct clk_core *core) 3059 { 3060 int ret; 3061 3062 lockdep_assert_held(&prepare_lock); 3063 if (!core->ops->get_phase) 3064 return 0; 3065 3066 /* Always try to update cached phase if possible */ 3067 ret = core->ops->get_phase(core->hw); 3068 if (ret >= 0) 3069 core->phase = ret; 3070 3071 return ret; 3072 } 3073 3074 /** 3075 * clk_get_phase - return the phase shift of a clock signal 3076 * @clk: clock signal source 3077 * 3078 * Returns the phase shift of a clock node in degrees, otherwise returns 3079 * -EERROR. 3080 */ 3081 int clk_get_phase(struct clk *clk) 3082 { 3083 int ret; 3084 3085 if (!clk) 3086 return 0; 3087 3088 clk_prepare_lock(); 3089 ret = clk_core_get_phase(clk->core); 3090 clk_prepare_unlock(); 3091 3092 return ret; 3093 } 3094 EXPORT_SYMBOL_GPL(clk_get_phase); 3095 3096 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) 3097 { 3098 /* Assume a default value of 50% */ 3099 core->duty.num = 1; 3100 core->duty.den = 2; 3101 } 3102 3103 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core); 3104 3105 static int clk_core_update_duty_cycle_nolock(struct clk_core *core) 3106 { 3107 struct clk_duty *duty = &core->duty; 3108 int ret = 0; 3109 3110 if (!core->ops->get_duty_cycle) 3111 return clk_core_update_duty_cycle_parent_nolock(core); 3112 3113 ret = core->ops->get_duty_cycle(core->hw, duty); 3114 if (ret) 3115 goto reset; 3116 3117 /* Don't trust the clock provider too much */ 3118 if (duty->den == 0 || duty->num > duty->den) { 3119 ret = -EINVAL; 3120 goto reset; 3121 } 3122 3123 return 0; 3124 3125 reset: 3126 clk_core_reset_duty_cycle_nolock(core); 3127 return ret; 3128 } 3129 3130 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) 3131 { 3132 int ret = 0; 3133 3134 if (core->parent && 3135 core->flags & CLK_DUTY_CYCLE_PARENT) { 3136 ret = clk_core_update_duty_cycle_nolock(core->parent); 3137 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); 3138 } else { 3139 clk_core_reset_duty_cycle_nolock(core); 3140 } 3141 3142 return ret; 3143 } 3144 3145 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, 3146 struct clk_duty *duty); 3147 3148 static int clk_core_set_duty_cycle_nolock(struct clk_core *core, 3149 struct clk_duty *duty) 3150 { 3151 int ret; 3152 3153 lockdep_assert_held(&prepare_lock); 3154 3155 if (clk_core_rate_is_protected(core)) 3156 return -EBUSY; 3157 3158 trace_clk_set_duty_cycle(core, duty); 3159 3160 if (!core->ops->set_duty_cycle) 3161 return clk_core_set_duty_cycle_parent_nolock(core, duty); 3162 3163 ret = core->ops->set_duty_cycle(core->hw, duty); 3164 if (!ret) 3165 memcpy(&core->duty, duty, sizeof(*duty)); 3166 3167 trace_clk_set_duty_cycle_complete(core, duty); 3168 3169 return ret; 3170 } 3171 3172 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, 3173 struct clk_duty *duty) 3174 { 3175 int ret = 0; 3176 3177 if (core->parent && 3178 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { 3179 ret = clk_core_set_duty_cycle_nolock(core->parent, duty); 3180 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); 3181 } 3182 3183 return ret; 3184 } 3185 3186 /** 3187 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 3188 * @clk: clock signal source 3189 * @num: numerator of the duty cycle ratio to be applied 3190 * @den: denominator of the duty cycle ratio to be applied 3191 * 3192 * Apply the duty cycle ratio if the ratio is valid and the clock can 3193 * perform this operation 3194 * 3195 * Returns (0) on success, a negative errno otherwise. 3196 */ 3197 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) 3198 { 3199 int ret; 3200 struct clk_duty duty; 3201 3202 if (!clk) 3203 return 0; 3204 3205 /* sanity check the ratio */ 3206 if (den == 0 || num > den) 3207 return -EINVAL; 3208 3209 duty.num = num; 3210 duty.den = den; 3211 3212 clk_prepare_lock(); 3213 3214 if (clk->exclusive_count) 3215 clk_core_rate_unprotect(clk->core); 3216 3217 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); 3218 3219 if (clk->exclusive_count) 3220 clk_core_rate_protect(clk->core); 3221 3222 clk_prepare_unlock(); 3223 3224 return ret; 3225 } 3226 EXPORT_SYMBOL_GPL(clk_set_duty_cycle); 3227 3228 static int clk_core_get_scaled_duty_cycle(struct clk_core *core, 3229 unsigned int scale) 3230 { 3231 struct clk_duty *duty = &core->duty; 3232 int ret; 3233 3234 clk_prepare_lock(); 3235 3236 ret = clk_core_update_duty_cycle_nolock(core); 3237 if (!ret) 3238 ret = mult_frac(scale, duty->num, duty->den); 3239 3240 clk_prepare_unlock(); 3241 3242 return ret; 3243 } 3244 3245 /** 3246 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal 3247 * @clk: clock signal source 3248 * @scale: scaling factor to be applied to represent the ratio as an integer 3249 * 3250 * Returns the duty cycle ratio of a clock node multiplied by the provided 3251 * scaling factor, or negative errno on error. 3252 */ 3253 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) 3254 { 3255 if (!clk) 3256 return 0; 3257 3258 return clk_core_get_scaled_duty_cycle(clk->core, scale); 3259 } 3260 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle); 3261 3262 /** 3263 * clk_is_match - check if two clk's point to the same hardware clock 3264 * @p: clk compared against q 3265 * @q: clk compared against p 3266 * 3267 * Returns true if the two struct clk pointers both point to the same hardware 3268 * clock node. Put differently, returns true if struct clk *p and struct clk *q 3269 * share the same struct clk_core object. 3270 * 3271 * Returns false otherwise. Note that two NULL clks are treated as matching. 3272 */ 3273 bool clk_is_match(const struct clk *p, const struct clk *q) 3274 { 3275 /* trivial case: identical struct clk's or both NULL */ 3276 if (p == q) 3277 return true; 3278 3279 /* true if clk->core pointers match. Avoid dereferencing garbage */ 3280 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) 3281 if (p->core == q->core) 3282 return true; 3283 3284 return false; 3285 } 3286 EXPORT_SYMBOL_GPL(clk_is_match); 3287 3288 /*** debugfs support ***/ 3289 3290 #ifdef CONFIG_DEBUG_FS 3291 #include <linux/debugfs.h> 3292 3293 static struct dentry *rootdir; 3294 static int inited = 0; 3295 static DEFINE_MUTEX(clk_debug_lock); 3296 static HLIST_HEAD(clk_debug_list); 3297 3298 static struct hlist_head *orphan_list[] = { 3299 &clk_orphan_list, 3300 NULL, 3301 }; 3302 3303 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, 3304 int level) 3305 { 3306 int phase; 3307 struct clk *clk_user; 3308 int multi_node = 0; 3309 3310 seq_printf(s, "%*s%-*s %-7d %-8d %-8d %-11lu %-10lu ", 3311 level * 3 + 1, "", 3312 35 - level * 3, c->name, 3313 c->enable_count, c->prepare_count, c->protect_count, 3314 clk_core_get_rate_recalc(c), 3315 clk_core_get_accuracy_recalc(c)); 3316 3317 phase = clk_core_get_phase(c); 3318 if (phase >= 0) 3319 seq_printf(s, "%-5d", phase); 3320 else 3321 seq_puts(s, "-----"); 3322 3323 seq_printf(s, " %-6d", clk_core_get_scaled_duty_cycle(c, 100000)); 3324 3325 if (c->ops->is_enabled) 3326 seq_printf(s, " %5c ", clk_core_is_enabled(c) ? 'Y' : 'N'); 3327 else if (!c->ops->enable) 3328 seq_printf(s, " %5c ", 'Y'); 3329 else 3330 seq_printf(s, " %5c ", '?'); 3331 3332 hlist_for_each_entry(clk_user, &c->clks, clks_node) { 3333 seq_printf(s, "%*s%-*s %-25s\n", 3334 level * 3 + 2 + 105 * multi_node, "", 3335 30, 3336 clk_user->dev_id ? clk_user->dev_id : "deviceless", 3337 clk_user->con_id ? clk_user->con_id : "no_connection_id"); 3338 3339 multi_node = 1; 3340 } 3341 3342 } 3343 3344 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, 3345 int level) 3346 { 3347 struct clk_core *child; 3348 3349 clk_summary_show_one(s, c, level); 3350 3351 hlist_for_each_entry(child, &c->children, child_node) 3352 clk_summary_show_subtree(s, child, level + 1); 3353 } 3354 3355 static int clk_summary_show(struct seq_file *s, void *data) 3356 { 3357 struct clk_core *c; 3358 struct hlist_head **lists = s->private; 3359 int ret; 3360 3361 seq_puts(s, " enable prepare protect duty hardware connection\n"); 3362 seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n"); 3363 seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n"); 3364 3365 ret = clk_pm_runtime_get_all(); 3366 if (ret) 3367 return ret; 3368 3369 clk_prepare_lock(); 3370 3371 for (; *lists; lists++) 3372 hlist_for_each_entry(c, *lists, child_node) 3373 clk_summary_show_subtree(s, c, 0); 3374 3375 clk_prepare_unlock(); 3376 clk_pm_runtime_put_all(); 3377 3378 return 0; 3379 } 3380 DEFINE_SHOW_ATTRIBUTE(clk_summary); 3381 3382 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) 3383 { 3384 int phase; 3385 unsigned long min_rate, max_rate; 3386 3387 clk_core_get_boundaries(c, &min_rate, &max_rate); 3388 3389 /* This should be JSON format, i.e. elements separated with a comma */ 3390 seq_printf(s, "\"%s\": { ", c->name); 3391 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 3392 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 3393 seq_printf(s, "\"protect_count\": %d,", c->protect_count); 3394 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c)); 3395 seq_printf(s, "\"min_rate\": %lu,", min_rate); 3396 seq_printf(s, "\"max_rate\": %lu,", max_rate); 3397 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c)); 3398 phase = clk_core_get_phase(c); 3399 if (phase >= 0) 3400 seq_printf(s, "\"phase\": %d,", phase); 3401 seq_printf(s, "\"duty_cycle\": %u", 3402 clk_core_get_scaled_duty_cycle(c, 100000)); 3403 } 3404 3405 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) 3406 { 3407 struct clk_core *child; 3408 3409 clk_dump_one(s, c, level); 3410 3411 hlist_for_each_entry(child, &c->children, child_node) { 3412 seq_putc(s, ','); 3413 clk_dump_subtree(s, child, level + 1); 3414 } 3415 3416 seq_putc(s, '}'); 3417 } 3418 3419 static int clk_dump_show(struct seq_file *s, void *data) 3420 { 3421 struct clk_core *c; 3422 bool first_node = true; 3423 struct hlist_head **lists = s->private; 3424 int ret; 3425 3426 ret = clk_pm_runtime_get_all(); 3427 if (ret) 3428 return ret; 3429 3430 seq_putc(s, '{'); 3431 3432 clk_prepare_lock(); 3433 3434 for (; *lists; lists++) { 3435 hlist_for_each_entry(c, *lists, child_node) { 3436 if (!first_node) 3437 seq_putc(s, ','); 3438 first_node = false; 3439 clk_dump_subtree(s, c, 0); 3440 } 3441 } 3442 3443 clk_prepare_unlock(); 3444 clk_pm_runtime_put_all(); 3445 3446 seq_puts(s, "}\n"); 3447 return 0; 3448 } 3449 DEFINE_SHOW_ATTRIBUTE(clk_dump); 3450 3451 #undef CLOCK_ALLOW_WRITE_DEBUGFS 3452 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3453 /* 3454 * This can be dangerous, therefore don't provide any real compile time 3455 * configuration option for this feature. 3456 * People who want to use this will need to modify the source code directly. 3457 */ 3458 static int clk_rate_set(void *data, u64 val) 3459 { 3460 struct clk_core *core = data; 3461 int ret; 3462 3463 clk_prepare_lock(); 3464 ret = clk_core_set_rate_nolock(core, val); 3465 clk_prepare_unlock(); 3466 3467 return ret; 3468 } 3469 3470 #define clk_rate_mode 0644 3471 3472 static int clk_phase_set(void *data, u64 val) 3473 { 3474 struct clk_core *core = data; 3475 int degrees = do_div(val, 360); 3476 int ret; 3477 3478 clk_prepare_lock(); 3479 ret = clk_core_set_phase_nolock(core, degrees); 3480 clk_prepare_unlock(); 3481 3482 return ret; 3483 } 3484 3485 #define clk_phase_mode 0644 3486 3487 static int clk_prepare_enable_set(void *data, u64 val) 3488 { 3489 struct clk_core *core = data; 3490 int ret = 0; 3491 3492 if (val) 3493 ret = clk_prepare_enable(core->hw->clk); 3494 else 3495 clk_disable_unprepare(core->hw->clk); 3496 3497 return ret; 3498 } 3499 3500 static int clk_prepare_enable_get(void *data, u64 *val) 3501 { 3502 struct clk_core *core = data; 3503 3504 *val = core->enable_count && core->prepare_count; 3505 return 0; 3506 } 3507 3508 DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get, 3509 clk_prepare_enable_set, "%llu\n"); 3510 3511 #else 3512 #define clk_rate_set NULL 3513 #define clk_rate_mode 0444 3514 3515 #define clk_phase_set NULL 3516 #define clk_phase_mode 0644 3517 #endif 3518 3519 static int clk_rate_get(void *data, u64 *val) 3520 { 3521 struct clk_core *core = data; 3522 3523 clk_prepare_lock(); 3524 *val = clk_core_get_rate_recalc(core); 3525 clk_prepare_unlock(); 3526 3527 return 0; 3528 } 3529 3530 DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n"); 3531 3532 static int clk_phase_get(void *data, u64 *val) 3533 { 3534 struct clk_core *core = data; 3535 3536 *val = core->phase; 3537 return 0; 3538 } 3539 3540 DEFINE_DEBUGFS_ATTRIBUTE(clk_phase_fops, clk_phase_get, clk_phase_set, "%llu\n"); 3541 3542 static const struct { 3543 unsigned long flag; 3544 const char *name; 3545 } clk_flags[] = { 3546 #define ENTRY(f) { f, #f } 3547 ENTRY(CLK_SET_RATE_GATE), 3548 ENTRY(CLK_SET_PARENT_GATE), 3549 ENTRY(CLK_SET_RATE_PARENT), 3550 ENTRY(CLK_IGNORE_UNUSED), 3551 ENTRY(CLK_GET_RATE_NOCACHE), 3552 ENTRY(CLK_SET_RATE_NO_REPARENT), 3553 ENTRY(CLK_GET_ACCURACY_NOCACHE), 3554 ENTRY(CLK_RECALC_NEW_RATES), 3555 ENTRY(CLK_SET_RATE_UNGATE), 3556 ENTRY(CLK_IS_CRITICAL), 3557 ENTRY(CLK_OPS_PARENT_ENABLE), 3558 ENTRY(CLK_DUTY_CYCLE_PARENT), 3559 #undef ENTRY 3560 }; 3561 3562 static int clk_flags_show(struct seq_file *s, void *data) 3563 { 3564 struct clk_core *core = s->private; 3565 unsigned long flags = core->flags; 3566 unsigned int i; 3567 3568 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) { 3569 if (flags & clk_flags[i].flag) { 3570 seq_printf(s, "%s\n", clk_flags[i].name); 3571 flags &= ~clk_flags[i].flag; 3572 } 3573 } 3574 if (flags) { 3575 /* Unknown flags */ 3576 seq_printf(s, "0x%lx\n", flags); 3577 } 3578 3579 return 0; 3580 } 3581 DEFINE_SHOW_ATTRIBUTE(clk_flags); 3582 3583 static void possible_parent_show(struct seq_file *s, struct clk_core *core, 3584 unsigned int i, char terminator) 3585 { 3586 struct clk_core *parent; 3587 const char *name = NULL; 3588 3589 /* 3590 * Go through the following options to fetch a parent's name. 3591 * 3592 * 1. Fetch the registered parent clock and use its name 3593 * 2. Use the global (fallback) name if specified 3594 * 3. Use the local fw_name if provided 3595 * 4. Fetch parent clock's clock-output-name if DT index was set 3596 * 3597 * This may still fail in some cases, such as when the parent is 3598 * specified directly via a struct clk_hw pointer, but it isn't 3599 * registered (yet). 3600 */ 3601 parent = clk_core_get_parent_by_index(core, i); 3602 if (parent) { 3603 seq_puts(s, parent->name); 3604 } else if (core->parents[i].name) { 3605 seq_puts(s, core->parents[i].name); 3606 } else if (core->parents[i].fw_name) { 3607 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name); 3608 } else { 3609 if (core->parents[i].index >= 0) 3610 name = of_clk_get_parent_name(core->of_node, core->parents[i].index); 3611 if (!name) 3612 name = "(missing)"; 3613 3614 seq_puts(s, name); 3615 } 3616 3617 seq_putc(s, terminator); 3618 } 3619 3620 static int possible_parents_show(struct seq_file *s, void *data) 3621 { 3622 struct clk_core *core = s->private; 3623 int i; 3624 3625 for (i = 0; i < core->num_parents - 1; i++) 3626 possible_parent_show(s, core, i, ' '); 3627 3628 possible_parent_show(s, core, i, '\n'); 3629 3630 return 0; 3631 } 3632 DEFINE_SHOW_ATTRIBUTE(possible_parents); 3633 3634 static int current_parent_show(struct seq_file *s, void *data) 3635 { 3636 struct clk_core *core = s->private; 3637 3638 if (core->parent) 3639 seq_printf(s, "%s\n", core->parent->name); 3640 3641 return 0; 3642 } 3643 DEFINE_SHOW_ATTRIBUTE(current_parent); 3644 3645 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3646 static ssize_t current_parent_write(struct file *file, const char __user *ubuf, 3647 size_t count, loff_t *ppos) 3648 { 3649 struct seq_file *s = file->private_data; 3650 struct clk_core *core = s->private; 3651 struct clk_core *parent; 3652 u8 idx; 3653 int err; 3654 3655 err = kstrtou8_from_user(ubuf, count, 0, &idx); 3656 if (err < 0) 3657 return err; 3658 3659 parent = clk_core_get_parent_by_index(core, idx); 3660 if (!parent) 3661 return -ENOENT; 3662 3663 clk_prepare_lock(); 3664 err = clk_core_set_parent_nolock(core, parent); 3665 clk_prepare_unlock(); 3666 if (err) 3667 return err; 3668 3669 return count; 3670 } 3671 3672 static const struct file_operations current_parent_rw_fops = { 3673 .open = current_parent_open, 3674 .write = current_parent_write, 3675 .read = seq_read, 3676 .llseek = seq_lseek, 3677 .release = single_release, 3678 }; 3679 #endif 3680 3681 static int clk_duty_cycle_show(struct seq_file *s, void *data) 3682 { 3683 struct clk_core *core = s->private; 3684 struct clk_duty *duty = &core->duty; 3685 3686 seq_printf(s, "%u/%u\n", duty->num, duty->den); 3687 3688 return 0; 3689 } 3690 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle); 3691 3692 static int clk_min_rate_show(struct seq_file *s, void *data) 3693 { 3694 struct clk_core *core = s->private; 3695 unsigned long min_rate, max_rate; 3696 3697 clk_prepare_lock(); 3698 clk_core_get_boundaries(core, &min_rate, &max_rate); 3699 clk_prepare_unlock(); 3700 seq_printf(s, "%lu\n", min_rate); 3701 3702 return 0; 3703 } 3704 DEFINE_SHOW_ATTRIBUTE(clk_min_rate); 3705 3706 static int clk_max_rate_show(struct seq_file *s, void *data) 3707 { 3708 struct clk_core *core = s->private; 3709 unsigned long min_rate, max_rate; 3710 3711 clk_prepare_lock(); 3712 clk_core_get_boundaries(core, &min_rate, &max_rate); 3713 clk_prepare_unlock(); 3714 seq_printf(s, "%lu\n", max_rate); 3715 3716 return 0; 3717 } 3718 DEFINE_SHOW_ATTRIBUTE(clk_max_rate); 3719 3720 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) 3721 { 3722 struct dentry *root; 3723 3724 if (!core || !pdentry) 3725 return; 3726 3727 root = debugfs_create_dir(core->name, pdentry); 3728 core->dentry = root; 3729 3730 debugfs_create_file("clk_rate", clk_rate_mode, root, core, 3731 &clk_rate_fops); 3732 debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops); 3733 debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops); 3734 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); 3735 debugfs_create_file("clk_phase", clk_phase_mode, root, core, 3736 &clk_phase_fops); 3737 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); 3738 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); 3739 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); 3740 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); 3741 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); 3742 debugfs_create_file("clk_duty_cycle", 0444, root, core, 3743 &clk_duty_cycle_fops); 3744 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3745 debugfs_create_file("clk_prepare_enable", 0644, root, core, 3746 &clk_prepare_enable_fops); 3747 3748 if (core->num_parents > 1) 3749 debugfs_create_file("clk_parent", 0644, root, core, 3750 ¤t_parent_rw_fops); 3751 else 3752 #endif 3753 if (core->num_parents > 0) 3754 debugfs_create_file("clk_parent", 0444, root, core, 3755 ¤t_parent_fops); 3756 3757 if (core->num_parents > 1) 3758 debugfs_create_file("clk_possible_parents", 0444, root, core, 3759 &possible_parents_fops); 3760 3761 if (core->ops->debug_init) 3762 core->ops->debug_init(core->hw, core->dentry); 3763 } 3764 3765 /** 3766 * clk_debug_register - add a clk node to the debugfs clk directory 3767 * @core: the clk being added to the debugfs clk directory 3768 * 3769 * Dynamically adds a clk to the debugfs clk directory if debugfs has been 3770 * initialized. Otherwise it bails out early since the debugfs clk directory 3771 * will be created lazily by clk_debug_init as part of a late_initcall. 3772 */ 3773 static void clk_debug_register(struct clk_core *core) 3774 { 3775 mutex_lock(&clk_debug_lock); 3776 hlist_add_head(&core->debug_node, &clk_debug_list); 3777 if (inited) 3778 clk_debug_create_one(core, rootdir); 3779 mutex_unlock(&clk_debug_lock); 3780 } 3781 3782 /** 3783 * clk_debug_unregister - remove a clk node from the debugfs clk directory 3784 * @core: the clk being removed from the debugfs clk directory 3785 * 3786 * Dynamically removes a clk and all its child nodes from the 3787 * debugfs clk directory if clk->dentry points to debugfs created by 3788 * clk_debug_register in __clk_core_init. 3789 */ 3790 static void clk_debug_unregister(struct clk_core *core) 3791 { 3792 mutex_lock(&clk_debug_lock); 3793 hlist_del_init(&core->debug_node); 3794 debugfs_remove_recursive(core->dentry); 3795 core->dentry = NULL; 3796 mutex_unlock(&clk_debug_lock); 3797 } 3798 3799 /** 3800 * clk_debug_init - lazily populate the debugfs clk directory 3801 * 3802 * clks are often initialized very early during boot before memory can be 3803 * dynamically allocated and well before debugfs is setup. This function 3804 * populates the debugfs clk directory once at boot-time when we know that 3805 * debugfs is setup. It should only be called once at boot-time, all other clks 3806 * added dynamically will be done so with clk_debug_register. 3807 */ 3808 static int __init clk_debug_init(void) 3809 { 3810 struct clk_core *core; 3811 3812 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3813 pr_warn("\n"); 3814 pr_warn("********************************************************************\n"); 3815 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3816 pr_warn("** **\n"); 3817 pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n"); 3818 pr_warn("** **\n"); 3819 pr_warn("** This means that this kernel is built to expose clk operations **\n"); 3820 pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n"); 3821 pr_warn("** to userspace, which may compromise security on your system. **\n"); 3822 pr_warn("** **\n"); 3823 pr_warn("** If you see this message and you are not debugging the **\n"); 3824 pr_warn("** kernel, report this immediately to your vendor! **\n"); 3825 pr_warn("** **\n"); 3826 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3827 pr_warn("********************************************************************\n"); 3828 #endif 3829 3830 rootdir = debugfs_create_dir("clk", NULL); 3831 3832 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, 3833 &clk_summary_fops); 3834 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists, 3835 &clk_dump_fops); 3836 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list, 3837 &clk_summary_fops); 3838 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list, 3839 &clk_dump_fops); 3840 3841 mutex_lock(&clk_debug_lock); 3842 hlist_for_each_entry(core, &clk_debug_list, debug_node) 3843 clk_debug_create_one(core, rootdir); 3844 3845 inited = 1; 3846 mutex_unlock(&clk_debug_lock); 3847 3848 return 0; 3849 } 3850 late_initcall(clk_debug_init); 3851 #else 3852 static inline void clk_debug_register(struct clk_core *core) { } 3853 static inline void clk_debug_unregister(struct clk_core *core) 3854 { 3855 } 3856 #endif 3857 3858 static void clk_core_reparent_orphans_nolock(void) 3859 { 3860 struct clk_core *orphan; 3861 struct hlist_node *tmp2; 3862 3863 /* 3864 * walk the list of orphan clocks and reparent any that newly finds a 3865 * parent. 3866 */ 3867 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 3868 struct clk_core *parent = __clk_init_parent(orphan); 3869 3870 /* 3871 * We need to use __clk_set_parent_before() and _after() to 3872 * properly migrate any prepare/enable count of the orphan 3873 * clock. This is important for CLK_IS_CRITICAL clocks, which 3874 * are enabled during init but might not have a parent yet. 3875 */ 3876 if (parent) { 3877 /* update the clk tree topology */ 3878 __clk_set_parent_before(orphan, parent); 3879 __clk_set_parent_after(orphan, parent, NULL); 3880 __clk_recalc_accuracies(orphan); 3881 __clk_recalc_rates(orphan, true, 0); 3882 3883 /* 3884 * __clk_init_parent() will set the initial req_rate to 3885 * 0 if the clock doesn't have clk_ops::recalc_rate and 3886 * is an orphan when it's registered. 3887 * 3888 * 'req_rate' is used by clk_set_rate_range() and 3889 * clk_put() to trigger a clk_set_rate() call whenever 3890 * the boundaries are modified. Let's make sure 3891 * 'req_rate' is set to something non-zero so that 3892 * clk_set_rate_range() doesn't drop the frequency. 3893 */ 3894 orphan->req_rate = orphan->rate; 3895 } 3896 } 3897 } 3898 3899 /** 3900 * __clk_core_init - initialize the data structures in a struct clk_core 3901 * @core: clk_core being initialized 3902 * 3903 * Initializes the lists in struct clk_core, queries the hardware for the 3904 * parent and rate and sets them both. 3905 */ 3906 static int __clk_core_init(struct clk_core *core) 3907 { 3908 int ret; 3909 struct clk_core *parent; 3910 unsigned long rate; 3911 int phase; 3912 3913 clk_prepare_lock(); 3914 3915 /* 3916 * Set hw->core after grabbing the prepare_lock to synchronize with 3917 * callers of clk_core_fill_parent_index() where we treat hw->core 3918 * being NULL as the clk not being registered yet. This is crucial so 3919 * that clks aren't parented until their parent is fully registered. 3920 */ 3921 core->hw->core = core; 3922 3923 ret = clk_pm_runtime_get(core); 3924 if (ret) 3925 goto unlock; 3926 3927 /* check to see if a clock with this name is already registered */ 3928 if (clk_core_lookup(core->name)) { 3929 pr_debug("%s: clk %s already initialized\n", 3930 __func__, core->name); 3931 ret = -EEXIST; 3932 goto out; 3933 } 3934 3935 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ 3936 if (core->ops->set_rate && 3937 !((core->ops->round_rate || core->ops->determine_rate) && 3938 core->ops->recalc_rate)) { 3939 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 3940 __func__, core->name); 3941 ret = -EINVAL; 3942 goto out; 3943 } 3944 3945 if (core->ops->set_parent && !core->ops->get_parent) { 3946 pr_err("%s: %s must implement .get_parent & .set_parent\n", 3947 __func__, core->name); 3948 ret = -EINVAL; 3949 goto out; 3950 } 3951 3952 if (core->ops->set_parent && !core->ops->determine_rate) { 3953 pr_err("%s: %s must implement .set_parent & .determine_rate\n", 3954 __func__, core->name); 3955 ret = -EINVAL; 3956 goto out; 3957 } 3958 3959 if (core->num_parents > 1 && !core->ops->get_parent) { 3960 pr_err("%s: %s must implement .get_parent as it has multi parents\n", 3961 __func__, core->name); 3962 ret = -EINVAL; 3963 goto out; 3964 } 3965 3966 if (core->ops->set_rate_and_parent && 3967 !(core->ops->set_parent && core->ops->set_rate)) { 3968 pr_err("%s: %s must implement .set_parent & .set_rate\n", 3969 __func__, core->name); 3970 ret = -EINVAL; 3971 goto out; 3972 } 3973 3974 /* 3975 * optional platform-specific magic 3976 * 3977 * The .init callback is not used by any of the basic clock types, but 3978 * exists for weird hardware that must perform initialization magic for 3979 * CCF to get an accurate view of clock for any other callbacks. It may 3980 * also be used needs to perform dynamic allocations. Such allocation 3981 * must be freed in the terminate() callback. 3982 * This callback shall not be used to initialize the parameters state, 3983 * such as rate, parent, etc ... 3984 * 3985 * If it exist, this callback should called before any other callback of 3986 * the clock 3987 */ 3988 if (core->ops->init) { 3989 ret = core->ops->init(core->hw); 3990 if (ret) 3991 goto out; 3992 } 3993 3994 parent = core->parent = __clk_init_parent(core); 3995 3996 /* 3997 * Populate core->parent if parent has already been clk_core_init'd. If 3998 * parent has not yet been clk_core_init'd then place clk in the orphan 3999 * list. If clk doesn't have any parents then place it in the root 4000 * clk list. 4001 * 4002 * Every time a new clk is clk_init'd then we walk the list of orphan 4003 * clocks and re-parent any that are children of the clock currently 4004 * being clk_init'd. 4005 */ 4006 if (parent) { 4007 hlist_add_head(&core->child_node, &parent->children); 4008 core->orphan = parent->orphan; 4009 } else if (!core->num_parents) { 4010 hlist_add_head(&core->child_node, &clk_root_list); 4011 core->orphan = false; 4012 } else { 4013 hlist_add_head(&core->child_node, &clk_orphan_list); 4014 core->orphan = true; 4015 } 4016 4017 /* 4018 * Set clk's accuracy. The preferred method is to use 4019 * .recalc_accuracy. For simple clocks and lazy developers the default 4020 * fallback is to use the parent's accuracy. If a clock doesn't have a 4021 * parent (or is orphaned) then accuracy is set to zero (perfect 4022 * clock). 4023 */ 4024 if (core->ops->recalc_accuracy) 4025 core->accuracy = core->ops->recalc_accuracy(core->hw, 4026 clk_core_get_accuracy_no_lock(parent)); 4027 else if (parent) 4028 core->accuracy = parent->accuracy; 4029 else 4030 core->accuracy = 0; 4031 4032 /* 4033 * Set clk's phase by clk_core_get_phase() caching the phase. 4034 * Since a phase is by definition relative to its parent, just 4035 * query the current clock phase, or just assume it's in phase. 4036 */ 4037 phase = clk_core_get_phase(core); 4038 if (phase < 0) { 4039 ret = phase; 4040 pr_warn("%s: Failed to get phase for clk '%s'\n", __func__, 4041 core->name); 4042 goto out; 4043 } 4044 4045 /* 4046 * Set clk's duty cycle. 4047 */ 4048 clk_core_update_duty_cycle_nolock(core); 4049 4050 /* 4051 * Set clk's rate. The preferred method is to use .recalc_rate. For 4052 * simple clocks and lazy developers the default fallback is to use the 4053 * parent's rate. If a clock doesn't have a parent (or is orphaned) 4054 * then rate is set to zero. 4055 */ 4056 if (core->ops->recalc_rate) 4057 rate = core->ops->recalc_rate(core->hw, 4058 clk_core_get_rate_nolock(parent)); 4059 else if (parent) 4060 rate = parent->rate; 4061 else 4062 rate = 0; 4063 core->rate = core->req_rate = rate; 4064 4065 /* 4066 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks 4067 * don't get accidentally disabled when walking the orphan tree and 4068 * reparenting clocks 4069 */ 4070 if (core->flags & CLK_IS_CRITICAL) { 4071 ret = clk_core_prepare(core); 4072 if (ret) { 4073 pr_warn("%s: critical clk '%s' failed to prepare\n", 4074 __func__, core->name); 4075 goto out; 4076 } 4077 4078 ret = clk_core_enable_lock(core); 4079 if (ret) { 4080 pr_warn("%s: critical clk '%s' failed to enable\n", 4081 __func__, core->name); 4082 clk_core_unprepare(core); 4083 goto out; 4084 } 4085 } 4086 4087 clk_core_reparent_orphans_nolock(); 4088 out: 4089 clk_pm_runtime_put(core); 4090 unlock: 4091 if (ret) { 4092 hlist_del_init(&core->child_node); 4093 core->hw->core = NULL; 4094 } 4095 4096 clk_prepare_unlock(); 4097 4098 if (!ret) 4099 clk_debug_register(core); 4100 4101 return ret; 4102 } 4103 4104 /** 4105 * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core 4106 * @core: clk to add consumer to 4107 * @clk: consumer to link to a clk 4108 */ 4109 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk) 4110 { 4111 clk_prepare_lock(); 4112 hlist_add_head(&clk->clks_node, &core->clks); 4113 clk_prepare_unlock(); 4114 } 4115 4116 /** 4117 * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core 4118 * @clk: consumer to unlink 4119 */ 4120 static void clk_core_unlink_consumer(struct clk *clk) 4121 { 4122 lockdep_assert_held(&prepare_lock); 4123 hlist_del(&clk->clks_node); 4124 } 4125 4126 /** 4127 * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core 4128 * @core: clk to allocate a consumer for 4129 * @dev_id: string describing device name 4130 * @con_id: connection ID string on device 4131 * 4132 * Returns: clk consumer left unlinked from the consumer list 4133 */ 4134 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id, 4135 const char *con_id) 4136 { 4137 struct clk *clk; 4138 4139 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 4140 if (!clk) 4141 return ERR_PTR(-ENOMEM); 4142 4143 clk->core = core; 4144 clk->dev_id = dev_id; 4145 clk->con_id = kstrdup_const(con_id, GFP_KERNEL); 4146 clk->max_rate = ULONG_MAX; 4147 4148 return clk; 4149 } 4150 4151 /** 4152 * free_clk - Free a clk consumer 4153 * @clk: clk consumer to free 4154 * 4155 * Note, this assumes the clk has been unlinked from the clk_core consumer 4156 * list. 4157 */ 4158 static void free_clk(struct clk *clk) 4159 { 4160 kfree_const(clk->con_id); 4161 kfree(clk); 4162 } 4163 4164 /** 4165 * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given 4166 * a clk_hw 4167 * @dev: clk consumer device 4168 * @hw: clk_hw associated with the clk being consumed 4169 * @dev_id: string describing device name 4170 * @con_id: connection ID string on device 4171 * 4172 * This is the main function used to create a clk pointer for use by clk 4173 * consumers. It connects a consumer to the clk_core and clk_hw structures 4174 * used by the framework and clk provider respectively. 4175 */ 4176 struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, 4177 const char *dev_id, const char *con_id) 4178 { 4179 struct clk *clk; 4180 struct clk_core *core; 4181 4182 /* This is to allow this function to be chained to others */ 4183 if (IS_ERR_OR_NULL(hw)) 4184 return ERR_CAST(hw); 4185 4186 core = hw->core; 4187 clk = alloc_clk(core, dev_id, con_id); 4188 if (IS_ERR(clk)) 4189 return clk; 4190 clk->dev = dev; 4191 4192 if (!try_module_get(core->owner)) { 4193 free_clk(clk); 4194 return ERR_PTR(-ENOENT); 4195 } 4196 4197 kref_get(&core->ref); 4198 clk_core_link_consumer(core, clk); 4199 4200 return clk; 4201 } 4202 4203 /** 4204 * clk_hw_get_clk - get clk consumer given an clk_hw 4205 * @hw: clk_hw associated with the clk being consumed 4206 * @con_id: connection ID string on device 4207 * 4208 * Returns: new clk consumer 4209 * This is the function to be used by providers which need 4210 * to get a consumer clk and act on the clock element 4211 * Calls to this function must be balanced with calls clk_put() 4212 */ 4213 struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id) 4214 { 4215 struct device *dev = hw->core->dev; 4216 const char *name = dev ? dev_name(dev) : NULL; 4217 4218 return clk_hw_create_clk(dev, hw, name, con_id); 4219 } 4220 EXPORT_SYMBOL(clk_hw_get_clk); 4221 4222 static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist) 4223 { 4224 const char *dst; 4225 4226 if (!src) { 4227 if (must_exist) 4228 return -EINVAL; 4229 return 0; 4230 } 4231 4232 *dst_p = dst = kstrdup_const(src, GFP_KERNEL); 4233 if (!dst) 4234 return -ENOMEM; 4235 4236 return 0; 4237 } 4238 4239 static int clk_core_populate_parent_map(struct clk_core *core, 4240 const struct clk_init_data *init) 4241 { 4242 u8 num_parents = init->num_parents; 4243 const char * const *parent_names = init->parent_names; 4244 const struct clk_hw **parent_hws = init->parent_hws; 4245 const struct clk_parent_data *parent_data = init->parent_data; 4246 int i, ret = 0; 4247 struct clk_parent_map *parents, *parent; 4248 4249 if (!num_parents) 4250 return 0; 4251 4252 /* 4253 * Avoid unnecessary string look-ups of clk_core's possible parents by 4254 * having a cache of names/clk_hw pointers to clk_core pointers. 4255 */ 4256 parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL); 4257 core->parents = parents; 4258 if (!parents) 4259 return -ENOMEM; 4260 4261 /* Copy everything over because it might be __initdata */ 4262 for (i = 0, parent = parents; i < num_parents; i++, parent++) { 4263 parent->index = -1; 4264 if (parent_names) { 4265 /* throw a WARN if any entries are NULL */ 4266 WARN(!parent_names[i], 4267 "%s: invalid NULL in %s's .parent_names\n", 4268 __func__, core->name); 4269 ret = clk_cpy_name(&parent->name, parent_names[i], 4270 true); 4271 } else if (parent_data) { 4272 parent->hw = parent_data[i].hw; 4273 parent->index = parent_data[i].index; 4274 ret = clk_cpy_name(&parent->fw_name, 4275 parent_data[i].fw_name, false); 4276 if (!ret) 4277 ret = clk_cpy_name(&parent->name, 4278 parent_data[i].name, 4279 false); 4280 } else if (parent_hws) { 4281 parent->hw = parent_hws[i]; 4282 } else { 4283 ret = -EINVAL; 4284 WARN(1, "Must specify parents if num_parents > 0\n"); 4285 } 4286 4287 if (ret) { 4288 do { 4289 kfree_const(parents[i].name); 4290 kfree_const(parents[i].fw_name); 4291 } while (--i >= 0); 4292 kfree(parents); 4293 4294 return ret; 4295 } 4296 } 4297 4298 return 0; 4299 } 4300 4301 static void clk_core_free_parent_map(struct clk_core *core) 4302 { 4303 int i = core->num_parents; 4304 4305 if (!core->num_parents) 4306 return; 4307 4308 while (--i >= 0) { 4309 kfree_const(core->parents[i].name); 4310 kfree_const(core->parents[i].fw_name); 4311 } 4312 4313 kfree(core->parents); 4314 } 4315 4316 /* Free memory allocated for a struct clk_core */ 4317 static void __clk_release(struct kref *ref) 4318 { 4319 struct clk_core *core = container_of(ref, struct clk_core, ref); 4320 4321 if (core->rpm_enabled) { 4322 mutex_lock(&clk_rpm_list_lock); 4323 hlist_del(&core->rpm_node); 4324 mutex_unlock(&clk_rpm_list_lock); 4325 } 4326 4327 clk_core_free_parent_map(core); 4328 kfree_const(core->name); 4329 kfree(core); 4330 } 4331 4332 static struct clk * 4333 __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) 4334 { 4335 int ret; 4336 struct clk_core *core; 4337 const struct clk_init_data *init = hw->init; 4338 4339 /* 4340 * The init data is not supposed to be used outside of registration path. 4341 * Set it to NULL so that provider drivers can't use it either and so that 4342 * we catch use of hw->init early on in the core. 4343 */ 4344 hw->init = NULL; 4345 4346 core = kzalloc(sizeof(*core), GFP_KERNEL); 4347 if (!core) { 4348 ret = -ENOMEM; 4349 goto fail_out; 4350 } 4351 4352 kref_init(&core->ref); 4353 4354 core->name = kstrdup_const(init->name, GFP_KERNEL); 4355 if (!core->name) { 4356 ret = -ENOMEM; 4357 goto fail_name; 4358 } 4359 4360 if (WARN_ON(!init->ops)) { 4361 ret = -EINVAL; 4362 goto fail_ops; 4363 } 4364 core->ops = init->ops; 4365 4366 core->dev = dev; 4367 clk_pm_runtime_init(core); 4368 core->of_node = np; 4369 if (dev && dev->driver) 4370 core->owner = dev->driver->owner; 4371 core->hw = hw; 4372 core->flags = init->flags; 4373 core->num_parents = init->num_parents; 4374 core->min_rate = 0; 4375 core->max_rate = ULONG_MAX; 4376 4377 ret = clk_core_populate_parent_map(core, init); 4378 if (ret) 4379 goto fail_parents; 4380 4381 INIT_HLIST_HEAD(&core->clks); 4382 4383 /* 4384 * Don't call clk_hw_create_clk() here because that would pin the 4385 * provider module to itself and prevent it from ever being removed. 4386 */ 4387 hw->clk = alloc_clk(core, NULL, NULL); 4388 if (IS_ERR(hw->clk)) { 4389 ret = PTR_ERR(hw->clk); 4390 goto fail_create_clk; 4391 } 4392 4393 clk_core_link_consumer(core, hw->clk); 4394 4395 ret = __clk_core_init(core); 4396 if (!ret) 4397 return hw->clk; 4398 4399 clk_prepare_lock(); 4400 clk_core_unlink_consumer(hw->clk); 4401 clk_prepare_unlock(); 4402 4403 free_clk(hw->clk); 4404 hw->clk = NULL; 4405 4406 fail_create_clk: 4407 fail_parents: 4408 fail_ops: 4409 fail_name: 4410 kref_put(&core->ref, __clk_release); 4411 fail_out: 4412 if (dev) { 4413 dev_err_probe(dev, ret, "failed to register clk '%s' (%pS)\n", 4414 init->name, hw); 4415 } else { 4416 pr_err("%pOF: error %pe: failed to register clk '%s' (%pS)\n", 4417 np, ERR_PTR(ret), init->name, hw); 4418 } 4419 return ERR_PTR(ret); 4420 } 4421 4422 /** 4423 * dev_or_parent_of_node() - Get device node of @dev or @dev's parent 4424 * @dev: Device to get device node of 4425 * 4426 * Return: device node pointer of @dev, or the device node pointer of 4427 * @dev->parent if dev doesn't have a device node, or NULL if neither 4428 * @dev or @dev->parent have a device node. 4429 */ 4430 static struct device_node *dev_or_parent_of_node(struct device *dev) 4431 { 4432 struct device_node *np; 4433 4434 if (!dev) 4435 return NULL; 4436 4437 np = dev_of_node(dev); 4438 if (!np) 4439 np = dev_of_node(dev->parent); 4440 4441 return np; 4442 } 4443 4444 /** 4445 * clk_register - allocate a new clock, register it and return an opaque cookie 4446 * @dev: device that is registering this clock 4447 * @hw: link to hardware-specific clock data 4448 * 4449 * clk_register is the *deprecated* interface for populating the clock tree with 4450 * new clock nodes. Use clk_hw_register() instead. 4451 * 4452 * Returns: a pointer to the newly allocated struct clk which 4453 * cannot be dereferenced by driver code but may be used in conjunction with the 4454 * rest of the clock API. In the event of an error clk_register will return an 4455 * error code; drivers must test for an error code after calling clk_register. 4456 */ 4457 struct clk *clk_register(struct device *dev, struct clk_hw *hw) 4458 { 4459 return __clk_register(dev, dev_or_parent_of_node(dev), hw); 4460 } 4461 EXPORT_SYMBOL_GPL(clk_register); 4462 4463 /** 4464 * clk_hw_register - register a clk_hw and return an error code 4465 * @dev: device that is registering this clock 4466 * @hw: link to hardware-specific clock data 4467 * 4468 * clk_hw_register is the primary interface for populating the clock tree with 4469 * new clock nodes. It returns an integer equal to zero indicating success or 4470 * less than zero indicating failure. Drivers must test for an error code after 4471 * calling clk_hw_register(). 4472 */ 4473 int clk_hw_register(struct device *dev, struct clk_hw *hw) 4474 { 4475 return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev), 4476 hw)); 4477 } 4478 EXPORT_SYMBOL_GPL(clk_hw_register); 4479 4480 /* 4481 * of_clk_hw_register - register a clk_hw and return an error code 4482 * @node: device_node of device that is registering this clock 4483 * @hw: link to hardware-specific clock data 4484 * 4485 * of_clk_hw_register() is the primary interface for populating the clock tree 4486 * with new clock nodes when a struct device is not available, but a struct 4487 * device_node is. It returns an integer equal to zero indicating success or 4488 * less than zero indicating failure. Drivers must test for an error code after 4489 * calling of_clk_hw_register(). 4490 */ 4491 int of_clk_hw_register(struct device_node *node, struct clk_hw *hw) 4492 { 4493 return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw)); 4494 } 4495 EXPORT_SYMBOL_GPL(of_clk_hw_register); 4496 4497 /* 4498 * Empty clk_ops for unregistered clocks. These are used temporarily 4499 * after clk_unregister() was called on a clock and until last clock 4500 * consumer calls clk_put() and the struct clk object is freed. 4501 */ 4502 static int clk_nodrv_prepare_enable(struct clk_hw *hw) 4503 { 4504 return -ENXIO; 4505 } 4506 4507 static void clk_nodrv_disable_unprepare(struct clk_hw *hw) 4508 { 4509 WARN_ON_ONCE(1); 4510 } 4511 4512 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, 4513 unsigned long parent_rate) 4514 { 4515 return -ENXIO; 4516 } 4517 4518 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) 4519 { 4520 return -ENXIO; 4521 } 4522 4523 static int clk_nodrv_determine_rate(struct clk_hw *hw, 4524 struct clk_rate_request *req) 4525 { 4526 return -ENXIO; 4527 } 4528 4529 static const struct clk_ops clk_nodrv_ops = { 4530 .enable = clk_nodrv_prepare_enable, 4531 .disable = clk_nodrv_disable_unprepare, 4532 .prepare = clk_nodrv_prepare_enable, 4533 .unprepare = clk_nodrv_disable_unprepare, 4534 .determine_rate = clk_nodrv_determine_rate, 4535 .set_rate = clk_nodrv_set_rate, 4536 .set_parent = clk_nodrv_set_parent, 4537 }; 4538 4539 static void clk_core_evict_parent_cache_subtree(struct clk_core *root, 4540 const struct clk_core *target) 4541 { 4542 int i; 4543 struct clk_core *child; 4544 4545 for (i = 0; i < root->num_parents; i++) 4546 if (root->parents[i].core == target) 4547 root->parents[i].core = NULL; 4548 4549 hlist_for_each_entry(child, &root->children, child_node) 4550 clk_core_evict_parent_cache_subtree(child, target); 4551 } 4552 4553 /* Remove this clk from all parent caches */ 4554 static void clk_core_evict_parent_cache(struct clk_core *core) 4555 { 4556 const struct hlist_head **lists; 4557 struct clk_core *root; 4558 4559 lockdep_assert_held(&prepare_lock); 4560 4561 for (lists = all_lists; *lists; lists++) 4562 hlist_for_each_entry(root, *lists, child_node) 4563 clk_core_evict_parent_cache_subtree(root, core); 4564 4565 } 4566 4567 /** 4568 * clk_unregister - unregister a currently registered clock 4569 * @clk: clock to unregister 4570 */ 4571 void clk_unregister(struct clk *clk) 4572 { 4573 unsigned long flags; 4574 const struct clk_ops *ops; 4575 4576 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 4577 return; 4578 4579 clk_debug_unregister(clk->core); 4580 4581 clk_prepare_lock(); 4582 4583 ops = clk->core->ops; 4584 if (ops == &clk_nodrv_ops) { 4585 pr_err("%s: unregistered clock: %s\n", __func__, 4586 clk->core->name); 4587 clk_prepare_unlock(); 4588 return; 4589 } 4590 /* 4591 * Assign empty clock ops for consumers that might still hold 4592 * a reference to this clock. 4593 */ 4594 flags = clk_enable_lock(); 4595 clk->core->ops = &clk_nodrv_ops; 4596 clk_enable_unlock(flags); 4597 4598 if (ops->terminate) 4599 ops->terminate(clk->core->hw); 4600 4601 if (!hlist_empty(&clk->core->children)) { 4602 struct clk_core *child; 4603 struct hlist_node *t; 4604 4605 /* Reparent all children to the orphan list. */ 4606 hlist_for_each_entry_safe(child, t, &clk->core->children, 4607 child_node) 4608 clk_core_set_parent_nolock(child, NULL); 4609 } 4610 4611 clk_core_evict_parent_cache(clk->core); 4612 4613 hlist_del_init(&clk->core->child_node); 4614 4615 if (clk->core->prepare_count) 4616 pr_warn("%s: unregistering prepared clock: %s\n", 4617 __func__, clk->core->name); 4618 4619 if (clk->core->protect_count) 4620 pr_warn("%s: unregistering protected clock: %s\n", 4621 __func__, clk->core->name); 4622 clk_prepare_unlock(); 4623 4624 kref_put(&clk->core->ref, __clk_release); 4625 free_clk(clk); 4626 } 4627 EXPORT_SYMBOL_GPL(clk_unregister); 4628 4629 /** 4630 * clk_hw_unregister - unregister a currently registered clk_hw 4631 * @hw: hardware-specific clock data to unregister 4632 */ 4633 void clk_hw_unregister(struct clk_hw *hw) 4634 { 4635 clk_unregister(hw->clk); 4636 } 4637 EXPORT_SYMBOL_GPL(clk_hw_unregister); 4638 4639 static void devm_clk_unregister_cb(struct device *dev, void *res) 4640 { 4641 clk_unregister(*(struct clk **)res); 4642 } 4643 4644 static void devm_clk_hw_unregister_cb(struct device *dev, void *res) 4645 { 4646 clk_hw_unregister(*(struct clk_hw **)res); 4647 } 4648 4649 /** 4650 * devm_clk_register - resource managed clk_register() 4651 * @dev: device that is registering this clock 4652 * @hw: link to hardware-specific clock data 4653 * 4654 * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead. 4655 * 4656 * Clocks returned from this function are automatically clk_unregister()ed on 4657 * driver detach. See clk_register() for more information. 4658 */ 4659 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 4660 { 4661 struct clk *clk; 4662 struct clk **clkp; 4663 4664 clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL); 4665 if (!clkp) 4666 return ERR_PTR(-ENOMEM); 4667 4668 clk = clk_register(dev, hw); 4669 if (!IS_ERR(clk)) { 4670 *clkp = clk; 4671 devres_add(dev, clkp); 4672 } else { 4673 devres_free(clkp); 4674 } 4675 4676 return clk; 4677 } 4678 EXPORT_SYMBOL_GPL(devm_clk_register); 4679 4680 /** 4681 * devm_clk_hw_register - resource managed clk_hw_register() 4682 * @dev: device that is registering this clock 4683 * @hw: link to hardware-specific clock data 4684 * 4685 * Managed clk_hw_register(). Clocks registered by this function are 4686 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register() 4687 * for more information. 4688 */ 4689 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw) 4690 { 4691 struct clk_hw **hwp; 4692 int ret; 4693 4694 hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL); 4695 if (!hwp) 4696 return -ENOMEM; 4697 4698 ret = clk_hw_register(dev, hw); 4699 if (!ret) { 4700 *hwp = hw; 4701 devres_add(dev, hwp); 4702 } else { 4703 devres_free(hwp); 4704 } 4705 4706 return ret; 4707 } 4708 EXPORT_SYMBOL_GPL(devm_clk_hw_register); 4709 4710 static void devm_clk_release(struct device *dev, void *res) 4711 { 4712 clk_put(*(struct clk **)res); 4713 } 4714 4715 /** 4716 * devm_clk_hw_get_clk - resource managed clk_hw_get_clk() 4717 * @dev: device that is registering this clock 4718 * @hw: clk_hw associated with the clk being consumed 4719 * @con_id: connection ID string on device 4720 * 4721 * Managed clk_hw_get_clk(). Clocks got with this function are 4722 * automatically clk_put() on driver detach. See clk_put() 4723 * for more information. 4724 */ 4725 struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw, 4726 const char *con_id) 4727 { 4728 struct clk *clk; 4729 struct clk **clkp; 4730 4731 /* This should not happen because it would mean we have drivers 4732 * passing around clk_hw pointers instead of having the caller use 4733 * proper clk_get() style APIs 4734 */ 4735 WARN_ON_ONCE(dev != hw->core->dev); 4736 4737 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); 4738 if (!clkp) 4739 return ERR_PTR(-ENOMEM); 4740 4741 clk = clk_hw_get_clk(hw, con_id); 4742 if (!IS_ERR(clk)) { 4743 *clkp = clk; 4744 devres_add(dev, clkp); 4745 } else { 4746 devres_free(clkp); 4747 } 4748 4749 return clk; 4750 } 4751 EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk); 4752 4753 /* 4754 * clkdev helpers 4755 */ 4756 4757 void __clk_put(struct clk *clk) 4758 { 4759 struct module *owner; 4760 4761 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 4762 return; 4763 4764 clk_prepare_lock(); 4765 4766 /* 4767 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a 4768 * given user should be balanced with calls to clk_rate_exclusive_put() 4769 * and by that same consumer 4770 */ 4771 if (WARN_ON(clk->exclusive_count)) { 4772 /* We voiced our concern, let's sanitize the situation */ 4773 clk->core->protect_count -= (clk->exclusive_count - 1); 4774 clk_core_rate_unprotect(clk->core); 4775 clk->exclusive_count = 0; 4776 } 4777 4778 clk_core_unlink_consumer(clk); 4779 4780 /* If we had any boundaries on that clock, let's drop them. */ 4781 if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX) 4782 clk_set_rate_range_nolock(clk, 0, ULONG_MAX); 4783 4784 clk_prepare_unlock(); 4785 4786 owner = clk->core->owner; 4787 kref_put(&clk->core->ref, __clk_release); 4788 module_put(owner); 4789 free_clk(clk); 4790 } 4791 4792 /*** clk rate change notifiers ***/ 4793 4794 /** 4795 * clk_notifier_register - add a clk rate change notifier 4796 * @clk: struct clk * to watch 4797 * @nb: struct notifier_block * with callback info 4798 * 4799 * Request notification when clk's rate changes. This uses an SRCU 4800 * notifier because we want it to block and notifier unregistrations are 4801 * uncommon. The callbacks associated with the notifier must not 4802 * re-enter into the clk framework by calling any top-level clk APIs; 4803 * this will cause a nested prepare_lock mutex. 4804 * 4805 * In all notification cases (pre, post and abort rate change) the original 4806 * clock rate is passed to the callback via struct clk_notifier_data.old_rate 4807 * and the new frequency is passed via struct clk_notifier_data.new_rate. 4808 * 4809 * clk_notifier_register() must be called from non-atomic context. 4810 * Returns -EINVAL if called with null arguments, -ENOMEM upon 4811 * allocation failure; otherwise, passes along the return value of 4812 * srcu_notifier_chain_register(). 4813 */ 4814 int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 4815 { 4816 struct clk_notifier *cn; 4817 int ret = -ENOMEM; 4818 4819 if (!clk || !nb) 4820 return -EINVAL; 4821 4822 clk_prepare_lock(); 4823 4824 /* search the list of notifiers for this clk */ 4825 list_for_each_entry(cn, &clk_notifier_list, node) 4826 if (cn->clk == clk) 4827 goto found; 4828 4829 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 4830 cn = kzalloc(sizeof(*cn), GFP_KERNEL); 4831 if (!cn) 4832 goto out; 4833 4834 cn->clk = clk; 4835 srcu_init_notifier_head(&cn->notifier_head); 4836 4837 list_add(&cn->node, &clk_notifier_list); 4838 4839 found: 4840 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 4841 4842 clk->core->notifier_count++; 4843 4844 out: 4845 clk_prepare_unlock(); 4846 4847 return ret; 4848 } 4849 EXPORT_SYMBOL_GPL(clk_notifier_register); 4850 4851 /** 4852 * clk_notifier_unregister - remove a clk rate change notifier 4853 * @clk: struct clk * 4854 * @nb: struct notifier_block * with callback info 4855 * 4856 * Request no further notification for changes to 'clk' and frees memory 4857 * allocated in clk_notifier_register. 4858 * 4859 * Returns -EINVAL if called with null arguments; otherwise, passes 4860 * along the return value of srcu_notifier_chain_unregister(). 4861 */ 4862 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 4863 { 4864 struct clk_notifier *cn; 4865 int ret = -ENOENT; 4866 4867 if (!clk || !nb) 4868 return -EINVAL; 4869 4870 clk_prepare_lock(); 4871 4872 list_for_each_entry(cn, &clk_notifier_list, node) { 4873 if (cn->clk == clk) { 4874 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 4875 4876 clk->core->notifier_count--; 4877 4878 /* XXX the notifier code should handle this better */ 4879 if (!cn->notifier_head.head) { 4880 srcu_cleanup_notifier_head(&cn->notifier_head); 4881 list_del(&cn->node); 4882 kfree(cn); 4883 } 4884 break; 4885 } 4886 } 4887 4888 clk_prepare_unlock(); 4889 4890 return ret; 4891 } 4892 EXPORT_SYMBOL_GPL(clk_notifier_unregister); 4893 4894 struct clk_notifier_devres { 4895 struct clk *clk; 4896 struct notifier_block *nb; 4897 }; 4898 4899 static void devm_clk_notifier_release(struct device *dev, void *res) 4900 { 4901 struct clk_notifier_devres *devres = res; 4902 4903 clk_notifier_unregister(devres->clk, devres->nb); 4904 } 4905 4906 int devm_clk_notifier_register(struct device *dev, struct clk *clk, 4907 struct notifier_block *nb) 4908 { 4909 struct clk_notifier_devres *devres; 4910 int ret; 4911 4912 devres = devres_alloc(devm_clk_notifier_release, 4913 sizeof(*devres), GFP_KERNEL); 4914 4915 if (!devres) 4916 return -ENOMEM; 4917 4918 ret = clk_notifier_register(clk, nb); 4919 if (!ret) { 4920 devres->clk = clk; 4921 devres->nb = nb; 4922 devres_add(dev, devres); 4923 } else { 4924 devres_free(devres); 4925 } 4926 4927 return ret; 4928 } 4929 EXPORT_SYMBOL_GPL(devm_clk_notifier_register); 4930 4931 #ifdef CONFIG_OF 4932 static void clk_core_reparent_orphans(void) 4933 { 4934 clk_prepare_lock(); 4935 clk_core_reparent_orphans_nolock(); 4936 clk_prepare_unlock(); 4937 } 4938 4939 /** 4940 * struct of_clk_provider - Clock provider registration structure 4941 * @link: Entry in global list of clock providers 4942 * @node: Pointer to device tree node of clock provider 4943 * @get: Get clock callback. Returns NULL or a struct clk for the 4944 * given clock specifier 4945 * @get_hw: Get clk_hw callback. Returns NULL, ERR_PTR or a 4946 * struct clk_hw for the given clock specifier 4947 * @data: context pointer to be passed into @get callback 4948 */ 4949 struct of_clk_provider { 4950 struct list_head link; 4951 4952 struct device_node *node; 4953 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 4954 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data); 4955 void *data; 4956 }; 4957 4958 extern struct of_device_id __clk_of_table; 4959 static const struct of_device_id __clk_of_table_sentinel 4960 __used __section("__clk_of_table_end"); 4961 4962 static LIST_HEAD(of_clk_providers); 4963 static DEFINE_MUTEX(of_clk_mutex); 4964 4965 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 4966 void *data) 4967 { 4968 return data; 4969 } 4970 EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 4971 4972 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) 4973 { 4974 return data; 4975 } 4976 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get); 4977 4978 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 4979 { 4980 struct clk_onecell_data *clk_data = data; 4981 unsigned int idx = clkspec->args[0]; 4982 4983 if (idx >= clk_data->clk_num) { 4984 pr_err("%s: invalid clock index %u\n", __func__, idx); 4985 return ERR_PTR(-EINVAL); 4986 } 4987 4988 return clk_data->clks[idx]; 4989 } 4990 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 4991 4992 struct clk_hw * 4993 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) 4994 { 4995 struct clk_hw_onecell_data *hw_data = data; 4996 unsigned int idx = clkspec->args[0]; 4997 4998 if (idx >= hw_data->num) { 4999 pr_err("%s: invalid index %u\n", __func__, idx); 5000 return ERR_PTR(-EINVAL); 5001 } 5002 5003 return hw_data->hws[idx]; 5004 } 5005 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get); 5006 5007 /** 5008 * of_clk_add_provider() - Register a clock provider for a node 5009 * @np: Device node pointer associated with clock provider 5010 * @clk_src_get: callback for decoding clock 5011 * @data: context pointer for @clk_src_get callback. 5012 * 5013 * This function is *deprecated*. Use of_clk_add_hw_provider() instead. 5014 */ 5015 int of_clk_add_provider(struct device_node *np, 5016 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 5017 void *data), 5018 void *data) 5019 { 5020 struct of_clk_provider *cp; 5021 int ret; 5022 5023 if (!np) 5024 return 0; 5025 5026 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 5027 if (!cp) 5028 return -ENOMEM; 5029 5030 cp->node = of_node_get(np); 5031 cp->data = data; 5032 cp->get = clk_src_get; 5033 5034 mutex_lock(&of_clk_mutex); 5035 list_add(&cp->link, &of_clk_providers); 5036 mutex_unlock(&of_clk_mutex); 5037 pr_debug("Added clock from %pOF\n", np); 5038 5039 clk_core_reparent_orphans(); 5040 5041 ret = of_clk_set_defaults(np, true); 5042 if (ret < 0) 5043 of_clk_del_provider(np); 5044 5045 fwnode_dev_initialized(&np->fwnode, true); 5046 5047 return ret; 5048 } 5049 EXPORT_SYMBOL_GPL(of_clk_add_provider); 5050 5051 /** 5052 * of_clk_add_hw_provider() - Register a clock provider for a node 5053 * @np: Device node pointer associated with clock provider 5054 * @get: callback for decoding clk_hw 5055 * @data: context pointer for @get callback. 5056 */ 5057 int of_clk_add_hw_provider(struct device_node *np, 5058 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 5059 void *data), 5060 void *data) 5061 { 5062 struct of_clk_provider *cp; 5063 int ret; 5064 5065 if (!np) 5066 return 0; 5067 5068 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 5069 if (!cp) 5070 return -ENOMEM; 5071 5072 cp->node = of_node_get(np); 5073 cp->data = data; 5074 cp->get_hw = get; 5075 5076 mutex_lock(&of_clk_mutex); 5077 list_add(&cp->link, &of_clk_providers); 5078 mutex_unlock(&of_clk_mutex); 5079 pr_debug("Added clk_hw provider from %pOF\n", np); 5080 5081 clk_core_reparent_orphans(); 5082 5083 ret = of_clk_set_defaults(np, true); 5084 if (ret < 0) 5085 of_clk_del_provider(np); 5086 5087 fwnode_dev_initialized(&np->fwnode, true); 5088 5089 return ret; 5090 } 5091 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider); 5092 5093 static void devm_of_clk_release_provider(struct device *dev, void *res) 5094 { 5095 of_clk_del_provider(*(struct device_node **)res); 5096 } 5097 5098 /* 5099 * We allow a child device to use its parent device as the clock provider node 5100 * for cases like MFD sub-devices where the child device driver wants to use 5101 * devm_*() APIs but not list the device in DT as a sub-node. 5102 */ 5103 static struct device_node *get_clk_provider_node(struct device *dev) 5104 { 5105 struct device_node *np, *parent_np; 5106 5107 np = dev->of_node; 5108 parent_np = dev->parent ? dev->parent->of_node : NULL; 5109 5110 if (!of_property_present(np, "#clock-cells")) 5111 if (of_property_present(parent_np, "#clock-cells")) 5112 np = parent_np; 5113 5114 return np; 5115 } 5116 5117 /** 5118 * devm_of_clk_add_hw_provider() - Managed clk provider node registration 5119 * @dev: Device acting as the clock provider (used for DT node and lifetime) 5120 * @get: callback for decoding clk_hw 5121 * @data: context pointer for @get callback 5122 * 5123 * Registers clock provider for given device's node. If the device has no DT 5124 * node or if the device node lacks of clock provider information (#clock-cells) 5125 * then the parent device's node is scanned for this information. If parent node 5126 * has the #clock-cells then it is used in registration. Provider is 5127 * automatically released at device exit. 5128 * 5129 * Return: 0 on success or an errno on failure. 5130 */ 5131 int devm_of_clk_add_hw_provider(struct device *dev, 5132 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 5133 void *data), 5134 void *data) 5135 { 5136 struct device_node **ptr, *np; 5137 int ret; 5138 5139 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr), 5140 GFP_KERNEL); 5141 if (!ptr) 5142 return -ENOMEM; 5143 5144 np = get_clk_provider_node(dev); 5145 ret = of_clk_add_hw_provider(np, get, data); 5146 if (!ret) { 5147 *ptr = np; 5148 devres_add(dev, ptr); 5149 } else { 5150 devres_free(ptr); 5151 } 5152 5153 return ret; 5154 } 5155 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider); 5156 5157 /** 5158 * of_clk_del_provider() - Remove a previously registered clock provider 5159 * @np: Device node pointer associated with clock provider 5160 */ 5161 void of_clk_del_provider(struct device_node *np) 5162 { 5163 struct of_clk_provider *cp; 5164 5165 if (!np) 5166 return; 5167 5168 mutex_lock(&of_clk_mutex); 5169 list_for_each_entry(cp, &of_clk_providers, link) { 5170 if (cp->node == np) { 5171 list_del(&cp->link); 5172 fwnode_dev_initialized(&np->fwnode, false); 5173 of_node_put(cp->node); 5174 kfree(cp); 5175 break; 5176 } 5177 } 5178 mutex_unlock(&of_clk_mutex); 5179 } 5180 EXPORT_SYMBOL_GPL(of_clk_del_provider); 5181 5182 /** 5183 * of_parse_clkspec() - Parse a DT clock specifier for a given device node 5184 * @np: device node to parse clock specifier from 5185 * @index: index of phandle to parse clock out of. If index < 0, @name is used 5186 * @name: clock name to find and parse. If name is NULL, the index is used 5187 * @out_args: Result of parsing the clock specifier 5188 * 5189 * Parses a device node's "clocks" and "clock-names" properties to find the 5190 * phandle and cells for the index or name that is desired. The resulting clock 5191 * specifier is placed into @out_args, or an errno is returned when there's a 5192 * parsing error. The @index argument is ignored if @name is non-NULL. 5193 * 5194 * Example: 5195 * 5196 * phandle1: clock-controller@1 { 5197 * #clock-cells = <2>; 5198 * } 5199 * 5200 * phandle2: clock-controller@2 { 5201 * #clock-cells = <1>; 5202 * } 5203 * 5204 * clock-consumer@3 { 5205 * clocks = <&phandle1 1 2 &phandle2 3>; 5206 * clock-names = "name1", "name2"; 5207 * } 5208 * 5209 * To get a device_node for `clock-controller@2' node you may call this 5210 * function a few different ways: 5211 * 5212 * of_parse_clkspec(clock-consumer@3, -1, "name2", &args); 5213 * of_parse_clkspec(clock-consumer@3, 1, NULL, &args); 5214 * of_parse_clkspec(clock-consumer@3, 1, "name2", &args); 5215 * 5216 * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT 5217 * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in 5218 * the "clock-names" property of @np. 5219 */ 5220 static int of_parse_clkspec(const struct device_node *np, int index, 5221 const char *name, struct of_phandle_args *out_args) 5222 { 5223 int ret = -ENOENT; 5224 5225 /* Walk up the tree of devices looking for a clock property that matches */ 5226 while (np) { 5227 /* 5228 * For named clocks, first look up the name in the 5229 * "clock-names" property. If it cannot be found, then index 5230 * will be an error code and of_parse_phandle_with_args() will 5231 * return -EINVAL. 5232 */ 5233 if (name) 5234 index = of_property_match_string(np, "clock-names", name); 5235 ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 5236 index, out_args); 5237 if (!ret) 5238 break; 5239 if (name && index >= 0) 5240 break; 5241 5242 /* 5243 * No matching clock found on this node. If the parent node 5244 * has a "clock-ranges" property, then we can try one of its 5245 * clocks. 5246 */ 5247 np = np->parent; 5248 if (np && !of_property_present(np, "clock-ranges")) 5249 break; 5250 index = 0; 5251 } 5252 5253 return ret; 5254 } 5255 5256 static struct clk_hw * 5257 __of_clk_get_hw_from_provider(struct of_clk_provider *provider, 5258 struct of_phandle_args *clkspec) 5259 { 5260 struct clk *clk; 5261 5262 if (provider->get_hw) 5263 return provider->get_hw(clkspec, provider->data); 5264 5265 clk = provider->get(clkspec, provider->data); 5266 if (IS_ERR(clk)) 5267 return ERR_CAST(clk); 5268 return __clk_get_hw(clk); 5269 } 5270 5271 static struct clk_hw * 5272 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) 5273 { 5274 struct of_clk_provider *provider; 5275 struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER); 5276 5277 if (!clkspec) 5278 return ERR_PTR(-EINVAL); 5279 5280 /* Check if node in clkspec is in disabled/fail state */ 5281 if (!of_device_is_available(clkspec->np)) 5282 return ERR_PTR(-ENOENT); 5283 5284 mutex_lock(&of_clk_mutex); 5285 list_for_each_entry(provider, &of_clk_providers, link) { 5286 if (provider->node == clkspec->np) { 5287 hw = __of_clk_get_hw_from_provider(provider, clkspec); 5288 if (!IS_ERR(hw)) 5289 break; 5290 } 5291 } 5292 mutex_unlock(&of_clk_mutex); 5293 5294 return hw; 5295 } 5296 5297 /** 5298 * of_clk_get_from_provider() - Lookup a clock from a clock provider 5299 * @clkspec: pointer to a clock specifier data structure 5300 * 5301 * This function looks up a struct clk from the registered list of clock 5302 * providers, an input is a clock specifier data structure as returned 5303 * from the of_parse_phandle_with_args() function call. 5304 */ 5305 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 5306 { 5307 struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec); 5308 5309 return clk_hw_create_clk(NULL, hw, NULL, __func__); 5310 } 5311 EXPORT_SYMBOL_GPL(of_clk_get_from_provider); 5312 5313 struct clk_hw *of_clk_get_hw(struct device_node *np, int index, 5314 const char *con_id) 5315 { 5316 int ret; 5317 struct clk_hw *hw; 5318 struct of_phandle_args clkspec; 5319 5320 ret = of_parse_clkspec(np, index, con_id, &clkspec); 5321 if (ret) 5322 return ERR_PTR(ret); 5323 5324 hw = of_clk_get_hw_from_clkspec(&clkspec); 5325 of_node_put(clkspec.np); 5326 5327 return hw; 5328 } 5329 5330 static struct clk *__of_clk_get(struct device_node *np, 5331 int index, const char *dev_id, 5332 const char *con_id) 5333 { 5334 struct clk_hw *hw = of_clk_get_hw(np, index, con_id); 5335 5336 return clk_hw_create_clk(NULL, hw, dev_id, con_id); 5337 } 5338 5339 struct clk *of_clk_get(struct device_node *np, int index) 5340 { 5341 return __of_clk_get(np, index, np->full_name, NULL); 5342 } 5343 EXPORT_SYMBOL(of_clk_get); 5344 5345 /** 5346 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node 5347 * @np: pointer to clock consumer node 5348 * @name: name of consumer's clock input, or NULL for the first clock reference 5349 * 5350 * This function parses the clocks and clock-names properties, 5351 * and uses them to look up the struct clk from the registered list of clock 5352 * providers. 5353 */ 5354 struct clk *of_clk_get_by_name(struct device_node *np, const char *name) 5355 { 5356 if (!np) 5357 return ERR_PTR(-ENOENT); 5358 5359 return __of_clk_get(np, 0, np->full_name, name); 5360 } 5361 EXPORT_SYMBOL(of_clk_get_by_name); 5362 5363 /** 5364 * of_clk_get_parent_count() - Count the number of clocks a device node has 5365 * @np: device node to count 5366 * 5367 * Returns: The number of clocks that are possible parents of this node 5368 */ 5369 unsigned int of_clk_get_parent_count(const struct device_node *np) 5370 { 5371 int count; 5372 5373 count = of_count_phandle_with_args(np, "clocks", "#clock-cells"); 5374 if (count < 0) 5375 return 0; 5376 5377 return count; 5378 } 5379 EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 5380 5381 const char *of_clk_get_parent_name(const struct device_node *np, int index) 5382 { 5383 struct of_phandle_args clkspec; 5384 const char *clk_name; 5385 bool found = false; 5386 u32 pv; 5387 int rc; 5388 int count; 5389 struct clk *clk; 5390 5391 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 5392 &clkspec); 5393 if (rc) 5394 return NULL; 5395 5396 index = clkspec.args_count ? clkspec.args[0] : 0; 5397 count = 0; 5398 5399 /* if there is an indices property, use it to transfer the index 5400 * specified into an array offset for the clock-output-names property. 5401 */ 5402 of_property_for_each_u32(clkspec.np, "clock-indices", pv) { 5403 if (index == pv) { 5404 index = count; 5405 found = true; 5406 break; 5407 } 5408 count++; 5409 } 5410 /* We went off the end of 'clock-indices' without finding it */ 5411 if (of_property_present(clkspec.np, "clock-indices") && !found) { 5412 of_node_put(clkspec.np); 5413 return NULL; 5414 } 5415 5416 if (of_property_read_string_index(clkspec.np, "clock-output-names", 5417 index, 5418 &clk_name) < 0) { 5419 /* 5420 * Best effort to get the name if the clock has been 5421 * registered with the framework. If the clock isn't 5422 * registered, we return the node name as the name of 5423 * the clock as long as #clock-cells = 0. 5424 */ 5425 clk = of_clk_get_from_provider(&clkspec); 5426 if (IS_ERR(clk)) { 5427 if (clkspec.args_count == 0) 5428 clk_name = clkspec.np->name; 5429 else 5430 clk_name = NULL; 5431 } else { 5432 clk_name = __clk_get_name(clk); 5433 clk_put(clk); 5434 } 5435 } 5436 5437 5438 of_node_put(clkspec.np); 5439 return clk_name; 5440 } 5441 EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 5442 5443 /** 5444 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return 5445 * number of parents 5446 * @np: Device node pointer associated with clock provider 5447 * @parents: pointer to char array that hold the parents' names 5448 * @size: size of the @parents array 5449 * 5450 * Return: number of parents for the clock node. 5451 */ 5452 int of_clk_parent_fill(struct device_node *np, const char **parents, 5453 unsigned int size) 5454 { 5455 unsigned int i = 0; 5456 5457 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) 5458 i++; 5459 5460 return i; 5461 } 5462 EXPORT_SYMBOL_GPL(of_clk_parent_fill); 5463 5464 struct clock_provider { 5465 void (*clk_init_cb)(struct device_node *); 5466 struct device_node *np; 5467 struct list_head node; 5468 }; 5469 5470 /* 5471 * This function looks for a parent clock. If there is one, then it 5472 * checks that the provider for this parent clock was initialized, in 5473 * this case the parent clock will be ready. 5474 */ 5475 static int parent_ready(struct device_node *np) 5476 { 5477 int i = 0; 5478 5479 while (true) { 5480 struct clk *clk = of_clk_get(np, i); 5481 5482 /* this parent is ready we can check the next one */ 5483 if (!IS_ERR(clk)) { 5484 clk_put(clk); 5485 i++; 5486 continue; 5487 } 5488 5489 /* at least one parent is not ready, we exit now */ 5490 if (PTR_ERR(clk) == -EPROBE_DEFER) 5491 return 0; 5492 5493 /* 5494 * Here we make assumption that the device tree is 5495 * written correctly. So an error means that there is 5496 * no more parent. As we didn't exit yet, then the 5497 * previous parent are ready. If there is no clock 5498 * parent, no need to wait for them, then we can 5499 * consider their absence as being ready 5500 */ 5501 return 1; 5502 } 5503 } 5504 5505 /** 5506 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree 5507 * @np: Device node pointer associated with clock provider 5508 * @index: clock index 5509 * @flags: pointer to top-level framework flags 5510 * 5511 * Detects if the clock-critical property exists and, if so, sets the 5512 * corresponding CLK_IS_CRITICAL flag. 5513 * 5514 * Do not use this function. It exists only for legacy Device Tree 5515 * bindings, such as the one-clock-per-node style that are outdated. 5516 * Those bindings typically put all clock data into .dts and the Linux 5517 * driver has no clock data, thus making it impossible to set this flag 5518 * correctly from the driver. Only those drivers may call 5519 * of_clk_detect_critical from their setup functions. 5520 * 5521 * Return: error code or zero on success 5522 */ 5523 int of_clk_detect_critical(struct device_node *np, int index, 5524 unsigned long *flags) 5525 { 5526 uint32_t idx; 5527 5528 if (!np || !flags) 5529 return -EINVAL; 5530 5531 of_property_for_each_u32(np, "clock-critical", idx) 5532 if (index == idx) 5533 *flags |= CLK_IS_CRITICAL; 5534 5535 return 0; 5536 } 5537 5538 /** 5539 * of_clk_init() - Scan and init clock providers from the DT 5540 * @matches: array of compatible values and init functions for providers. 5541 * 5542 * This function scans the device tree for matching clock providers 5543 * and calls their initialization functions. It also does it by trying 5544 * to follow the dependencies. 5545 */ 5546 void __init of_clk_init(const struct of_device_id *matches) 5547 { 5548 const struct of_device_id *match; 5549 struct device_node *np; 5550 struct clock_provider *clk_provider, *next; 5551 bool is_init_done; 5552 bool force = false; 5553 LIST_HEAD(clk_provider_list); 5554 5555 if (!matches) 5556 matches = &__clk_of_table; 5557 5558 /* First prepare the list of the clocks providers */ 5559 for_each_matching_node_and_match(np, matches, &match) { 5560 struct clock_provider *parent; 5561 5562 if (!of_device_is_available(np)) 5563 continue; 5564 5565 parent = kzalloc(sizeof(*parent), GFP_KERNEL); 5566 if (!parent) { 5567 list_for_each_entry_safe(clk_provider, next, 5568 &clk_provider_list, node) { 5569 list_del(&clk_provider->node); 5570 of_node_put(clk_provider->np); 5571 kfree(clk_provider); 5572 } 5573 of_node_put(np); 5574 return; 5575 } 5576 5577 parent->clk_init_cb = match->data; 5578 parent->np = of_node_get(np); 5579 list_add_tail(&parent->node, &clk_provider_list); 5580 } 5581 5582 while (!list_empty(&clk_provider_list)) { 5583 is_init_done = false; 5584 list_for_each_entry_safe(clk_provider, next, 5585 &clk_provider_list, node) { 5586 if (force || parent_ready(clk_provider->np)) { 5587 5588 /* Don't populate platform devices */ 5589 of_node_set_flag(clk_provider->np, 5590 OF_POPULATED); 5591 5592 clk_provider->clk_init_cb(clk_provider->np); 5593 of_clk_set_defaults(clk_provider->np, true); 5594 5595 list_del(&clk_provider->node); 5596 of_node_put(clk_provider->np); 5597 kfree(clk_provider); 5598 is_init_done = true; 5599 } 5600 } 5601 5602 /* 5603 * We didn't manage to initialize any of the 5604 * remaining providers during the last loop, so now we 5605 * initialize all the remaining ones unconditionally 5606 * in case the clock parent was not mandatory 5607 */ 5608 if (!is_init_done) 5609 force = true; 5610 } 5611 } 5612 #endif 5613