1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 4 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 5 * 6 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst 7 */ 8 9 #include <linux/clk.h> 10 #include <linux/clk-provider.h> 11 #include <linux/clk/clk-conf.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/spinlock.h> 15 #include <linux/err.h> 16 #include <linux/list.h> 17 #include <linux/slab.h> 18 #include <linux/of.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/sched.h> 23 #include <linux/clkdev.h> 24 25 #include "clk.h" 26 27 static DEFINE_SPINLOCK(enable_lock); 28 static DEFINE_MUTEX(prepare_lock); 29 30 static struct task_struct *prepare_owner; 31 static struct task_struct *enable_owner; 32 33 static int prepare_refcnt; 34 static int enable_refcnt; 35 36 static HLIST_HEAD(clk_root_list); 37 static HLIST_HEAD(clk_orphan_list); 38 static LIST_HEAD(clk_notifier_list); 39 40 static const struct hlist_head *all_lists[] = { 41 &clk_root_list, 42 &clk_orphan_list, 43 NULL, 44 }; 45 46 /*** private data structures ***/ 47 48 struct clk_parent_map { 49 const struct clk_hw *hw; 50 struct clk_core *core; 51 const char *fw_name; 52 const char *name; 53 int index; 54 }; 55 56 struct clk_core { 57 const char *name; 58 const struct clk_ops *ops; 59 struct clk_hw *hw; 60 struct module *owner; 61 struct device *dev; 62 struct device_node *of_node; 63 struct clk_core *parent; 64 struct clk_parent_map *parents; 65 u8 num_parents; 66 u8 new_parent_index; 67 unsigned long rate; 68 unsigned long req_rate; 69 unsigned long new_rate; 70 struct clk_core *new_parent; 71 struct clk_core *new_child; 72 unsigned long flags; 73 bool orphan; 74 bool rpm_enabled; 75 unsigned int enable_count; 76 unsigned int prepare_count; 77 unsigned int protect_count; 78 unsigned long min_rate; 79 unsigned long max_rate; 80 unsigned long accuracy; 81 int phase; 82 struct clk_duty duty; 83 struct hlist_head children; 84 struct hlist_node child_node; 85 struct hlist_head clks; 86 unsigned int notifier_count; 87 #ifdef CONFIG_DEBUG_FS 88 struct dentry *dentry; 89 struct hlist_node debug_node; 90 #endif 91 struct kref ref; 92 }; 93 94 #define CREATE_TRACE_POINTS 95 #include <trace/events/clk.h> 96 97 struct clk { 98 struct clk_core *core; 99 struct device *dev; 100 const char *dev_id; 101 const char *con_id; 102 unsigned long min_rate; 103 unsigned long max_rate; 104 unsigned int exclusive_count; 105 struct hlist_node clks_node; 106 }; 107 108 /*** runtime pm ***/ 109 static int clk_pm_runtime_get(struct clk_core *core) 110 { 111 if (!core->rpm_enabled) 112 return 0; 113 114 return pm_runtime_resume_and_get(core->dev); 115 } 116 117 static void clk_pm_runtime_put(struct clk_core *core) 118 { 119 if (!core->rpm_enabled) 120 return; 121 122 pm_runtime_put_sync(core->dev); 123 } 124 125 /*** locking ***/ 126 static void clk_prepare_lock(void) 127 { 128 if (!mutex_trylock(&prepare_lock)) { 129 if (prepare_owner == current) { 130 prepare_refcnt++; 131 return; 132 } 133 mutex_lock(&prepare_lock); 134 } 135 WARN_ON_ONCE(prepare_owner != NULL); 136 WARN_ON_ONCE(prepare_refcnt != 0); 137 prepare_owner = current; 138 prepare_refcnt = 1; 139 } 140 141 static void clk_prepare_unlock(void) 142 { 143 WARN_ON_ONCE(prepare_owner != current); 144 WARN_ON_ONCE(prepare_refcnt == 0); 145 146 if (--prepare_refcnt) 147 return; 148 prepare_owner = NULL; 149 mutex_unlock(&prepare_lock); 150 } 151 152 static unsigned long clk_enable_lock(void) 153 __acquires(enable_lock) 154 { 155 unsigned long flags; 156 157 /* 158 * On UP systems, spin_trylock_irqsave() always returns true, even if 159 * we already hold the lock. So, in that case, we rely only on 160 * reference counting. 161 */ 162 if (!IS_ENABLED(CONFIG_SMP) || 163 !spin_trylock_irqsave(&enable_lock, flags)) { 164 if (enable_owner == current) { 165 enable_refcnt++; 166 __acquire(enable_lock); 167 if (!IS_ENABLED(CONFIG_SMP)) 168 local_save_flags(flags); 169 return flags; 170 } 171 spin_lock_irqsave(&enable_lock, flags); 172 } 173 WARN_ON_ONCE(enable_owner != NULL); 174 WARN_ON_ONCE(enable_refcnt != 0); 175 enable_owner = current; 176 enable_refcnt = 1; 177 return flags; 178 } 179 180 static void clk_enable_unlock(unsigned long flags) 181 __releases(enable_lock) 182 { 183 WARN_ON_ONCE(enable_owner != current); 184 WARN_ON_ONCE(enable_refcnt == 0); 185 186 if (--enable_refcnt) { 187 __release(enable_lock); 188 return; 189 } 190 enable_owner = NULL; 191 spin_unlock_irqrestore(&enable_lock, flags); 192 } 193 194 static bool clk_core_rate_is_protected(struct clk_core *core) 195 { 196 return core->protect_count; 197 } 198 199 static bool clk_core_is_prepared(struct clk_core *core) 200 { 201 bool ret = false; 202 203 /* 204 * .is_prepared is optional for clocks that can prepare 205 * fall back to software usage counter if it is missing 206 */ 207 if (!core->ops->is_prepared) 208 return core->prepare_count; 209 210 if (!clk_pm_runtime_get(core)) { 211 ret = core->ops->is_prepared(core->hw); 212 clk_pm_runtime_put(core); 213 } 214 215 return ret; 216 } 217 218 static bool clk_core_is_enabled(struct clk_core *core) 219 { 220 bool ret = false; 221 222 /* 223 * .is_enabled is only mandatory for clocks that gate 224 * fall back to software usage counter if .is_enabled is missing 225 */ 226 if (!core->ops->is_enabled) 227 return core->enable_count; 228 229 /* 230 * Check if clock controller's device is runtime active before 231 * calling .is_enabled callback. If not, assume that clock is 232 * disabled, because we might be called from atomic context, from 233 * which pm_runtime_get() is not allowed. 234 * This function is called mainly from clk_disable_unused_subtree, 235 * which ensures proper runtime pm activation of controller before 236 * taking enable spinlock, but the below check is needed if one tries 237 * to call it from other places. 238 */ 239 if (core->rpm_enabled) { 240 pm_runtime_get_noresume(core->dev); 241 if (!pm_runtime_active(core->dev)) { 242 ret = false; 243 goto done; 244 } 245 } 246 247 ret = core->ops->is_enabled(core->hw); 248 done: 249 if (core->rpm_enabled) 250 pm_runtime_put(core->dev); 251 252 return ret; 253 } 254 255 /*** helper functions ***/ 256 257 const char *__clk_get_name(const struct clk *clk) 258 { 259 return !clk ? NULL : clk->core->name; 260 } 261 EXPORT_SYMBOL_GPL(__clk_get_name); 262 263 const char *clk_hw_get_name(const struct clk_hw *hw) 264 { 265 return hw->core->name; 266 } 267 EXPORT_SYMBOL_GPL(clk_hw_get_name); 268 269 struct clk_hw *__clk_get_hw(struct clk *clk) 270 { 271 return !clk ? NULL : clk->core->hw; 272 } 273 EXPORT_SYMBOL_GPL(__clk_get_hw); 274 275 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) 276 { 277 return hw->core->num_parents; 278 } 279 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); 280 281 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) 282 { 283 return hw->core->parent ? hw->core->parent->hw : NULL; 284 } 285 EXPORT_SYMBOL_GPL(clk_hw_get_parent); 286 287 static struct clk_core *__clk_lookup_subtree(const char *name, 288 struct clk_core *core) 289 { 290 struct clk_core *child; 291 struct clk_core *ret; 292 293 if (!strcmp(core->name, name)) 294 return core; 295 296 hlist_for_each_entry(child, &core->children, child_node) { 297 ret = __clk_lookup_subtree(name, child); 298 if (ret) 299 return ret; 300 } 301 302 return NULL; 303 } 304 305 static struct clk_core *clk_core_lookup(const char *name) 306 { 307 struct clk_core *root_clk; 308 struct clk_core *ret; 309 310 if (!name) 311 return NULL; 312 313 /* search the 'proper' clk tree first */ 314 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 315 ret = __clk_lookup_subtree(name, root_clk); 316 if (ret) 317 return ret; 318 } 319 320 /* if not found, then search the orphan tree */ 321 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 322 ret = __clk_lookup_subtree(name, root_clk); 323 if (ret) 324 return ret; 325 } 326 327 return NULL; 328 } 329 330 #ifdef CONFIG_OF 331 static int of_parse_clkspec(const struct device_node *np, int index, 332 const char *name, struct of_phandle_args *out_args); 333 static struct clk_hw * 334 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec); 335 #else 336 static inline int of_parse_clkspec(const struct device_node *np, int index, 337 const char *name, 338 struct of_phandle_args *out_args) 339 { 340 return -ENOENT; 341 } 342 static inline struct clk_hw * 343 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) 344 { 345 return ERR_PTR(-ENOENT); 346 } 347 #endif 348 349 /** 350 * clk_core_get - Find the clk_core parent of a clk 351 * @core: clk to find parent of 352 * @p_index: parent index to search for 353 * 354 * This is the preferred method for clk providers to find the parent of a 355 * clk when that parent is external to the clk controller. The parent_names 356 * array is indexed and treated as a local name matching a string in the device 357 * node's 'clock-names' property or as the 'con_id' matching the device's 358 * dev_name() in a clk_lookup. This allows clk providers to use their own 359 * namespace instead of looking for a globally unique parent string. 360 * 361 * For example the following DT snippet would allow a clock registered by the 362 * clock-controller@c001 that has a clk_init_data::parent_data array 363 * with 'xtal' in the 'name' member to find the clock provided by the 364 * clock-controller@f00abcd without needing to get the globally unique name of 365 * the xtal clk. 366 * 367 * parent: clock-controller@f00abcd { 368 * reg = <0xf00abcd 0xabcd>; 369 * #clock-cells = <0>; 370 * }; 371 * 372 * clock-controller@c001 { 373 * reg = <0xc001 0xf00d>; 374 * clocks = <&parent>; 375 * clock-names = "xtal"; 376 * #clock-cells = <1>; 377 * }; 378 * 379 * Returns: -ENOENT when the provider can't be found or the clk doesn't 380 * exist in the provider or the name can't be found in the DT node or 381 * in a clkdev lookup. NULL when the provider knows about the clk but it 382 * isn't provided on this system. 383 * A valid clk_core pointer when the clk can be found in the provider. 384 */ 385 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) 386 { 387 const char *name = core->parents[p_index].fw_name; 388 int index = core->parents[p_index].index; 389 struct clk_hw *hw = ERR_PTR(-ENOENT); 390 struct device *dev = core->dev; 391 const char *dev_id = dev ? dev_name(dev) : NULL; 392 struct device_node *np = core->of_node; 393 struct of_phandle_args clkspec; 394 395 if (np && (name || index >= 0) && 396 !of_parse_clkspec(np, index, name, &clkspec)) { 397 hw = of_clk_get_hw_from_clkspec(&clkspec); 398 of_node_put(clkspec.np); 399 } else if (name) { 400 /* 401 * If the DT search above couldn't find the provider fallback to 402 * looking up via clkdev based clk_lookups. 403 */ 404 hw = clk_find_hw(dev_id, name); 405 } 406 407 if (IS_ERR(hw)) 408 return ERR_CAST(hw); 409 410 return hw->core; 411 } 412 413 static void clk_core_fill_parent_index(struct clk_core *core, u8 index) 414 { 415 struct clk_parent_map *entry = &core->parents[index]; 416 struct clk_core *parent; 417 418 if (entry->hw) { 419 parent = entry->hw->core; 420 } else { 421 parent = clk_core_get(core, index); 422 if (PTR_ERR(parent) == -ENOENT && entry->name) 423 parent = clk_core_lookup(entry->name); 424 } 425 426 /* 427 * We have a direct reference but it isn't registered yet? 428 * Orphan it and let clk_reparent() update the orphan status 429 * when the parent is registered. 430 */ 431 if (!parent) 432 parent = ERR_PTR(-EPROBE_DEFER); 433 434 /* Only cache it if it's not an error */ 435 if (!IS_ERR(parent)) 436 entry->core = parent; 437 } 438 439 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, 440 u8 index) 441 { 442 if (!core || index >= core->num_parents || !core->parents) 443 return NULL; 444 445 if (!core->parents[index].core) 446 clk_core_fill_parent_index(core, index); 447 448 return core->parents[index].core; 449 } 450 451 struct clk_hw * 452 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) 453 { 454 struct clk_core *parent; 455 456 parent = clk_core_get_parent_by_index(hw->core, index); 457 458 return !parent ? NULL : parent->hw; 459 } 460 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); 461 462 unsigned int __clk_get_enable_count(struct clk *clk) 463 { 464 return !clk ? 0 : clk->core->enable_count; 465 } 466 467 static unsigned long clk_core_get_rate_nolock(struct clk_core *core) 468 { 469 if (!core) 470 return 0; 471 472 if (!core->num_parents || core->parent) 473 return core->rate; 474 475 /* 476 * Clk must have a parent because num_parents > 0 but the parent isn't 477 * known yet. Best to return 0 as the rate of this clk until we can 478 * properly recalc the rate based on the parent's rate. 479 */ 480 return 0; 481 } 482 483 unsigned long clk_hw_get_rate(const struct clk_hw *hw) 484 { 485 return clk_core_get_rate_nolock(hw->core); 486 } 487 EXPORT_SYMBOL_GPL(clk_hw_get_rate); 488 489 static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core) 490 { 491 if (!core) 492 return 0; 493 494 return core->accuracy; 495 } 496 497 unsigned long clk_hw_get_flags(const struct clk_hw *hw) 498 { 499 return hw->core->flags; 500 } 501 EXPORT_SYMBOL_GPL(clk_hw_get_flags); 502 503 bool clk_hw_is_prepared(const struct clk_hw *hw) 504 { 505 return clk_core_is_prepared(hw->core); 506 } 507 EXPORT_SYMBOL_GPL(clk_hw_is_prepared); 508 509 bool clk_hw_rate_is_protected(const struct clk_hw *hw) 510 { 511 return clk_core_rate_is_protected(hw->core); 512 } 513 EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected); 514 515 bool clk_hw_is_enabled(const struct clk_hw *hw) 516 { 517 return clk_core_is_enabled(hw->core); 518 } 519 EXPORT_SYMBOL_GPL(clk_hw_is_enabled); 520 521 bool __clk_is_enabled(struct clk *clk) 522 { 523 if (!clk) 524 return false; 525 526 return clk_core_is_enabled(clk->core); 527 } 528 EXPORT_SYMBOL_GPL(__clk_is_enabled); 529 530 static bool mux_is_better_rate(unsigned long rate, unsigned long now, 531 unsigned long best, unsigned long flags) 532 { 533 if (flags & CLK_MUX_ROUND_CLOSEST) 534 return abs(now - rate) < abs(best - rate); 535 536 return now <= rate && now > best; 537 } 538 539 static void clk_core_init_rate_req(struct clk_core * const core, 540 struct clk_rate_request *req, 541 unsigned long rate); 542 543 static int clk_core_round_rate_nolock(struct clk_core *core, 544 struct clk_rate_request *req); 545 546 static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent) 547 { 548 struct clk_core *tmp; 549 unsigned int i; 550 551 /* Optimize for the case where the parent is already the parent. */ 552 if (core->parent == parent) 553 return true; 554 555 for (i = 0; i < core->num_parents; i++) { 556 tmp = clk_core_get_parent_by_index(core, i); 557 if (!tmp) 558 continue; 559 560 if (tmp == parent) 561 return true; 562 } 563 564 return false; 565 } 566 567 static void 568 clk_core_forward_rate_req(struct clk_core *core, 569 const struct clk_rate_request *old_req, 570 struct clk_core *parent, 571 struct clk_rate_request *req, 572 unsigned long parent_rate) 573 { 574 if (WARN_ON(!clk_core_has_parent(core, parent))) 575 return; 576 577 clk_core_init_rate_req(parent, req, parent_rate); 578 579 if (req->min_rate < old_req->min_rate) 580 req->min_rate = old_req->min_rate; 581 582 if (req->max_rate > old_req->max_rate) 583 req->max_rate = old_req->max_rate; 584 } 585 586 int clk_mux_determine_rate_flags(struct clk_hw *hw, 587 struct clk_rate_request *req, 588 unsigned long flags) 589 { 590 struct clk_core *core = hw->core, *parent, *best_parent = NULL; 591 int i, num_parents, ret; 592 unsigned long best = 0; 593 594 /* if NO_REPARENT flag set, pass through to current parent */ 595 if (core->flags & CLK_SET_RATE_NO_REPARENT) { 596 parent = core->parent; 597 if (core->flags & CLK_SET_RATE_PARENT) { 598 struct clk_rate_request parent_req; 599 600 if (!parent) { 601 req->rate = 0; 602 return 0; 603 } 604 605 clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate); 606 ret = clk_core_round_rate_nolock(parent, &parent_req); 607 if (ret) 608 return ret; 609 610 best = parent_req.rate; 611 } else if (parent) { 612 best = clk_core_get_rate_nolock(parent); 613 } else { 614 best = clk_core_get_rate_nolock(core); 615 } 616 617 goto out; 618 } 619 620 /* find the parent that can provide the fastest rate <= rate */ 621 num_parents = core->num_parents; 622 for (i = 0; i < num_parents; i++) { 623 unsigned long parent_rate; 624 625 parent = clk_core_get_parent_by_index(core, i); 626 if (!parent) 627 continue; 628 629 if (core->flags & CLK_SET_RATE_PARENT) { 630 struct clk_rate_request parent_req; 631 632 clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate); 633 ret = clk_core_round_rate_nolock(parent, &parent_req); 634 if (ret) 635 continue; 636 637 parent_rate = parent_req.rate; 638 } else { 639 parent_rate = clk_core_get_rate_nolock(parent); 640 } 641 642 if (mux_is_better_rate(req->rate, parent_rate, 643 best, flags)) { 644 best_parent = parent; 645 best = parent_rate; 646 } 647 } 648 649 if (!best_parent) 650 return -EINVAL; 651 652 out: 653 if (best_parent) 654 req->best_parent_hw = best_parent->hw; 655 req->best_parent_rate = best; 656 req->rate = best; 657 658 return 0; 659 } 660 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); 661 662 struct clk *__clk_lookup(const char *name) 663 { 664 struct clk_core *core = clk_core_lookup(name); 665 666 return !core ? NULL : core->hw->clk; 667 } 668 669 static void clk_core_get_boundaries(struct clk_core *core, 670 unsigned long *min_rate, 671 unsigned long *max_rate) 672 { 673 struct clk *clk_user; 674 675 lockdep_assert_held(&prepare_lock); 676 677 *min_rate = core->min_rate; 678 *max_rate = core->max_rate; 679 680 hlist_for_each_entry(clk_user, &core->clks, clks_node) 681 *min_rate = max(*min_rate, clk_user->min_rate); 682 683 hlist_for_each_entry(clk_user, &core->clks, clks_node) 684 *max_rate = min(*max_rate, clk_user->max_rate); 685 } 686 687 /* 688 * clk_hw_get_rate_range() - returns the clock rate range for a hw clk 689 * @hw: the hw clk we want to get the range from 690 * @min_rate: pointer to the variable that will hold the minimum 691 * @max_rate: pointer to the variable that will hold the maximum 692 * 693 * Fills the @min_rate and @max_rate variables with the minimum and 694 * maximum that clock can reach. 695 */ 696 void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate, 697 unsigned long *max_rate) 698 { 699 clk_core_get_boundaries(hw->core, min_rate, max_rate); 700 } 701 EXPORT_SYMBOL_GPL(clk_hw_get_rate_range); 702 703 static bool clk_core_check_boundaries(struct clk_core *core, 704 unsigned long min_rate, 705 unsigned long max_rate) 706 { 707 struct clk *user; 708 709 lockdep_assert_held(&prepare_lock); 710 711 if (min_rate > core->max_rate || max_rate < core->min_rate) 712 return false; 713 714 hlist_for_each_entry(user, &core->clks, clks_node) 715 if (min_rate > user->max_rate || max_rate < user->min_rate) 716 return false; 717 718 return true; 719 } 720 721 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, 722 unsigned long max_rate) 723 { 724 hw->core->min_rate = min_rate; 725 hw->core->max_rate = max_rate; 726 } 727 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); 728 729 /* 730 * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk 731 * @hw: mux type clk to determine rate on 732 * @req: rate request, also used to return preferred parent and frequencies 733 * 734 * Helper for finding best parent to provide a given frequency. This can be used 735 * directly as a determine_rate callback (e.g. for a mux), or from a more 736 * complex clock that may combine a mux with other operations. 737 * 738 * Returns: 0 on success, -EERROR value on error 739 */ 740 int __clk_mux_determine_rate(struct clk_hw *hw, 741 struct clk_rate_request *req) 742 { 743 return clk_mux_determine_rate_flags(hw, req, 0); 744 } 745 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 746 747 int __clk_mux_determine_rate_closest(struct clk_hw *hw, 748 struct clk_rate_request *req) 749 { 750 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); 751 } 752 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); 753 754 /*** clk api ***/ 755 756 static void clk_core_rate_unprotect(struct clk_core *core) 757 { 758 lockdep_assert_held(&prepare_lock); 759 760 if (!core) 761 return; 762 763 if (WARN(core->protect_count == 0, 764 "%s already unprotected\n", core->name)) 765 return; 766 767 if (--core->protect_count > 0) 768 return; 769 770 clk_core_rate_unprotect(core->parent); 771 } 772 773 static int clk_core_rate_nuke_protect(struct clk_core *core) 774 { 775 int ret; 776 777 lockdep_assert_held(&prepare_lock); 778 779 if (!core) 780 return -EINVAL; 781 782 if (core->protect_count == 0) 783 return 0; 784 785 ret = core->protect_count; 786 core->protect_count = 1; 787 clk_core_rate_unprotect(core); 788 789 return ret; 790 } 791 792 /** 793 * clk_rate_exclusive_put - release exclusivity over clock rate control 794 * @clk: the clk over which the exclusivity is released 795 * 796 * clk_rate_exclusive_put() completes a critical section during which a clock 797 * consumer cannot tolerate any other consumer making any operation on the 798 * clock which could result in a rate change or rate glitch. Exclusive clocks 799 * cannot have their rate changed, either directly or indirectly due to changes 800 * further up the parent chain of clocks. As a result, clocks up parent chain 801 * also get under exclusive control of the calling consumer. 802 * 803 * If exlusivity is claimed more than once on clock, even by the same consumer, 804 * the rate effectively gets locked as exclusivity can't be preempted. 805 * 806 * Calls to clk_rate_exclusive_put() must be balanced with calls to 807 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return 808 * error status. 809 */ 810 void clk_rate_exclusive_put(struct clk *clk) 811 { 812 if (!clk) 813 return; 814 815 clk_prepare_lock(); 816 817 /* 818 * if there is something wrong with this consumer protect count, stop 819 * here before messing with the provider 820 */ 821 if (WARN_ON(clk->exclusive_count <= 0)) 822 goto out; 823 824 clk_core_rate_unprotect(clk->core); 825 clk->exclusive_count--; 826 out: 827 clk_prepare_unlock(); 828 } 829 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); 830 831 static void clk_core_rate_protect(struct clk_core *core) 832 { 833 lockdep_assert_held(&prepare_lock); 834 835 if (!core) 836 return; 837 838 if (core->protect_count == 0) 839 clk_core_rate_protect(core->parent); 840 841 core->protect_count++; 842 } 843 844 static void clk_core_rate_restore_protect(struct clk_core *core, int count) 845 { 846 lockdep_assert_held(&prepare_lock); 847 848 if (!core) 849 return; 850 851 if (count == 0) 852 return; 853 854 clk_core_rate_protect(core); 855 core->protect_count = count; 856 } 857 858 /** 859 * clk_rate_exclusive_get - get exclusivity over the clk rate control 860 * @clk: the clk over which the exclusity of rate control is requested 861 * 862 * clk_rate_exclusive_get() begins a critical section during which a clock 863 * consumer cannot tolerate any other consumer making any operation on the 864 * clock which could result in a rate change or rate glitch. Exclusive clocks 865 * cannot have their rate changed, either directly or indirectly due to changes 866 * further up the parent chain of clocks. As a result, clocks up parent chain 867 * also get under exclusive control of the calling consumer. 868 * 869 * If exlusivity is claimed more than once on clock, even by the same consumer, 870 * the rate effectively gets locked as exclusivity can't be preempted. 871 * 872 * Calls to clk_rate_exclusive_get() should be balanced with calls to 873 * clk_rate_exclusive_put(). Calls to this function may sleep. 874 * Returns 0 on success, -EERROR otherwise 875 */ 876 int clk_rate_exclusive_get(struct clk *clk) 877 { 878 if (!clk) 879 return 0; 880 881 clk_prepare_lock(); 882 clk_core_rate_protect(clk->core); 883 clk->exclusive_count++; 884 clk_prepare_unlock(); 885 886 return 0; 887 } 888 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); 889 890 static void clk_core_unprepare(struct clk_core *core) 891 { 892 lockdep_assert_held(&prepare_lock); 893 894 if (!core) 895 return; 896 897 if (WARN(core->prepare_count == 0, 898 "%s already unprepared\n", core->name)) 899 return; 900 901 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, 902 "Unpreparing critical %s\n", core->name)) 903 return; 904 905 if (core->flags & CLK_SET_RATE_GATE) 906 clk_core_rate_unprotect(core); 907 908 if (--core->prepare_count > 0) 909 return; 910 911 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name); 912 913 trace_clk_unprepare(core); 914 915 if (core->ops->unprepare) 916 core->ops->unprepare(core->hw); 917 918 trace_clk_unprepare_complete(core); 919 clk_core_unprepare(core->parent); 920 clk_pm_runtime_put(core); 921 } 922 923 static void clk_core_unprepare_lock(struct clk_core *core) 924 { 925 clk_prepare_lock(); 926 clk_core_unprepare(core); 927 clk_prepare_unlock(); 928 } 929 930 /** 931 * clk_unprepare - undo preparation of a clock source 932 * @clk: the clk being unprepared 933 * 934 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 935 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 936 * if the operation may sleep. One example is a clk which is accessed over 937 * I2c. In the complex case a clk gate operation may require a fast and a slow 938 * part. It is this reason that clk_unprepare and clk_disable are not mutually 939 * exclusive. In fact clk_disable must be called before clk_unprepare. 940 */ 941 void clk_unprepare(struct clk *clk) 942 { 943 if (IS_ERR_OR_NULL(clk)) 944 return; 945 946 clk_core_unprepare_lock(clk->core); 947 } 948 EXPORT_SYMBOL_GPL(clk_unprepare); 949 950 static int clk_core_prepare(struct clk_core *core) 951 { 952 int ret = 0; 953 954 lockdep_assert_held(&prepare_lock); 955 956 if (!core) 957 return 0; 958 959 if (core->prepare_count == 0) { 960 ret = clk_pm_runtime_get(core); 961 if (ret) 962 return ret; 963 964 ret = clk_core_prepare(core->parent); 965 if (ret) 966 goto runtime_put; 967 968 trace_clk_prepare(core); 969 970 if (core->ops->prepare) 971 ret = core->ops->prepare(core->hw); 972 973 trace_clk_prepare_complete(core); 974 975 if (ret) 976 goto unprepare; 977 } 978 979 core->prepare_count++; 980 981 /* 982 * CLK_SET_RATE_GATE is a special case of clock protection 983 * Instead of a consumer claiming exclusive rate control, it is 984 * actually the provider which prevents any consumer from making any 985 * operation which could result in a rate change or rate glitch while 986 * the clock is prepared. 987 */ 988 if (core->flags & CLK_SET_RATE_GATE) 989 clk_core_rate_protect(core); 990 991 return 0; 992 unprepare: 993 clk_core_unprepare(core->parent); 994 runtime_put: 995 clk_pm_runtime_put(core); 996 return ret; 997 } 998 999 static int clk_core_prepare_lock(struct clk_core *core) 1000 { 1001 int ret; 1002 1003 clk_prepare_lock(); 1004 ret = clk_core_prepare(core); 1005 clk_prepare_unlock(); 1006 1007 return ret; 1008 } 1009 1010 /** 1011 * clk_prepare - prepare a clock source 1012 * @clk: the clk being prepared 1013 * 1014 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 1015 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 1016 * operation may sleep. One example is a clk which is accessed over I2c. In 1017 * the complex case a clk ungate operation may require a fast and a slow part. 1018 * It is this reason that clk_prepare and clk_enable are not mutually 1019 * exclusive. In fact clk_prepare must be called before clk_enable. 1020 * Returns 0 on success, -EERROR otherwise. 1021 */ 1022 int clk_prepare(struct clk *clk) 1023 { 1024 if (!clk) 1025 return 0; 1026 1027 return clk_core_prepare_lock(clk->core); 1028 } 1029 EXPORT_SYMBOL_GPL(clk_prepare); 1030 1031 static void clk_core_disable(struct clk_core *core) 1032 { 1033 lockdep_assert_held(&enable_lock); 1034 1035 if (!core) 1036 return; 1037 1038 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name)) 1039 return; 1040 1041 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, 1042 "Disabling critical %s\n", core->name)) 1043 return; 1044 1045 if (--core->enable_count > 0) 1046 return; 1047 1048 trace_clk_disable_rcuidle(core); 1049 1050 if (core->ops->disable) 1051 core->ops->disable(core->hw); 1052 1053 trace_clk_disable_complete_rcuidle(core); 1054 1055 clk_core_disable(core->parent); 1056 } 1057 1058 static void clk_core_disable_lock(struct clk_core *core) 1059 { 1060 unsigned long flags; 1061 1062 flags = clk_enable_lock(); 1063 clk_core_disable(core); 1064 clk_enable_unlock(flags); 1065 } 1066 1067 /** 1068 * clk_disable - gate a clock 1069 * @clk: the clk being gated 1070 * 1071 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 1072 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 1073 * clk if the operation is fast and will never sleep. One example is a 1074 * SoC-internal clk which is controlled via simple register writes. In the 1075 * complex case a clk gate operation may require a fast and a slow part. It is 1076 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 1077 * In fact clk_disable must be called before clk_unprepare. 1078 */ 1079 void clk_disable(struct clk *clk) 1080 { 1081 if (IS_ERR_OR_NULL(clk)) 1082 return; 1083 1084 clk_core_disable_lock(clk->core); 1085 } 1086 EXPORT_SYMBOL_GPL(clk_disable); 1087 1088 static int clk_core_enable(struct clk_core *core) 1089 { 1090 int ret = 0; 1091 1092 lockdep_assert_held(&enable_lock); 1093 1094 if (!core) 1095 return 0; 1096 1097 if (WARN(core->prepare_count == 0, 1098 "Enabling unprepared %s\n", core->name)) 1099 return -ESHUTDOWN; 1100 1101 if (core->enable_count == 0) { 1102 ret = clk_core_enable(core->parent); 1103 1104 if (ret) 1105 return ret; 1106 1107 trace_clk_enable_rcuidle(core); 1108 1109 if (core->ops->enable) 1110 ret = core->ops->enable(core->hw); 1111 1112 trace_clk_enable_complete_rcuidle(core); 1113 1114 if (ret) { 1115 clk_core_disable(core->parent); 1116 return ret; 1117 } 1118 } 1119 1120 core->enable_count++; 1121 return 0; 1122 } 1123 1124 static int clk_core_enable_lock(struct clk_core *core) 1125 { 1126 unsigned long flags; 1127 int ret; 1128 1129 flags = clk_enable_lock(); 1130 ret = clk_core_enable(core); 1131 clk_enable_unlock(flags); 1132 1133 return ret; 1134 } 1135 1136 /** 1137 * clk_gate_restore_context - restore context for poweroff 1138 * @hw: the clk_hw pointer of clock whose state is to be restored 1139 * 1140 * The clock gate restore context function enables or disables 1141 * the gate clocks based on the enable_count. This is done in cases 1142 * where the clock context is lost and based on the enable_count 1143 * the clock either needs to be enabled/disabled. This 1144 * helps restore the state of gate clocks. 1145 */ 1146 void clk_gate_restore_context(struct clk_hw *hw) 1147 { 1148 struct clk_core *core = hw->core; 1149 1150 if (core->enable_count) 1151 core->ops->enable(hw); 1152 else 1153 core->ops->disable(hw); 1154 } 1155 EXPORT_SYMBOL_GPL(clk_gate_restore_context); 1156 1157 static int clk_core_save_context(struct clk_core *core) 1158 { 1159 struct clk_core *child; 1160 int ret = 0; 1161 1162 hlist_for_each_entry(child, &core->children, child_node) { 1163 ret = clk_core_save_context(child); 1164 if (ret < 0) 1165 return ret; 1166 } 1167 1168 if (core->ops && core->ops->save_context) 1169 ret = core->ops->save_context(core->hw); 1170 1171 return ret; 1172 } 1173 1174 static void clk_core_restore_context(struct clk_core *core) 1175 { 1176 struct clk_core *child; 1177 1178 if (core->ops && core->ops->restore_context) 1179 core->ops->restore_context(core->hw); 1180 1181 hlist_for_each_entry(child, &core->children, child_node) 1182 clk_core_restore_context(child); 1183 } 1184 1185 /** 1186 * clk_save_context - save clock context for poweroff 1187 * 1188 * Saves the context of the clock register for powerstates in which the 1189 * contents of the registers will be lost. Occurs deep within the suspend 1190 * code. Returns 0 on success. 1191 */ 1192 int clk_save_context(void) 1193 { 1194 struct clk_core *clk; 1195 int ret; 1196 1197 hlist_for_each_entry(clk, &clk_root_list, child_node) { 1198 ret = clk_core_save_context(clk); 1199 if (ret < 0) 1200 return ret; 1201 } 1202 1203 hlist_for_each_entry(clk, &clk_orphan_list, child_node) { 1204 ret = clk_core_save_context(clk); 1205 if (ret < 0) 1206 return ret; 1207 } 1208 1209 return 0; 1210 } 1211 EXPORT_SYMBOL_GPL(clk_save_context); 1212 1213 /** 1214 * clk_restore_context - restore clock context after poweroff 1215 * 1216 * Restore the saved clock context upon resume. 1217 * 1218 */ 1219 void clk_restore_context(void) 1220 { 1221 struct clk_core *core; 1222 1223 hlist_for_each_entry(core, &clk_root_list, child_node) 1224 clk_core_restore_context(core); 1225 1226 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1227 clk_core_restore_context(core); 1228 } 1229 EXPORT_SYMBOL_GPL(clk_restore_context); 1230 1231 /** 1232 * clk_enable - ungate a clock 1233 * @clk: the clk being ungated 1234 * 1235 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 1236 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 1237 * if the operation will never sleep. One example is a SoC-internal clk which 1238 * is controlled via simple register writes. In the complex case a clk ungate 1239 * operation may require a fast and a slow part. It is this reason that 1240 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 1241 * must be called before clk_enable. Returns 0 on success, -EERROR 1242 * otherwise. 1243 */ 1244 int clk_enable(struct clk *clk) 1245 { 1246 if (!clk) 1247 return 0; 1248 1249 return clk_core_enable_lock(clk->core); 1250 } 1251 EXPORT_SYMBOL_GPL(clk_enable); 1252 1253 /** 1254 * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. 1255 * @clk: clock source 1256 * 1257 * Returns true if clk_prepare() implicitly enables the clock, effectively 1258 * making clk_enable()/clk_disable() no-ops, false otherwise. 1259 * 1260 * This is of interest mainly to power management code where actually 1261 * disabling the clock also requires unpreparing it to have any material 1262 * effect. 1263 * 1264 * Regardless of the value returned here, the caller must always invoke 1265 * clk_enable() or clk_prepare_enable() and counterparts for usage counts 1266 * to be right. 1267 */ 1268 bool clk_is_enabled_when_prepared(struct clk *clk) 1269 { 1270 return clk && !(clk->core->ops->enable && clk->core->ops->disable); 1271 } 1272 EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared); 1273 1274 static int clk_core_prepare_enable(struct clk_core *core) 1275 { 1276 int ret; 1277 1278 ret = clk_core_prepare_lock(core); 1279 if (ret) 1280 return ret; 1281 1282 ret = clk_core_enable_lock(core); 1283 if (ret) 1284 clk_core_unprepare_lock(core); 1285 1286 return ret; 1287 } 1288 1289 static void clk_core_disable_unprepare(struct clk_core *core) 1290 { 1291 clk_core_disable_lock(core); 1292 clk_core_unprepare_lock(core); 1293 } 1294 1295 static void __init clk_unprepare_unused_subtree(struct clk_core *core) 1296 { 1297 struct clk_core *child; 1298 1299 lockdep_assert_held(&prepare_lock); 1300 1301 hlist_for_each_entry(child, &core->children, child_node) 1302 clk_unprepare_unused_subtree(child); 1303 1304 if (core->prepare_count) 1305 return; 1306 1307 if (core->flags & CLK_IGNORE_UNUSED) 1308 return; 1309 1310 if (clk_pm_runtime_get(core)) 1311 return; 1312 1313 if (clk_core_is_prepared(core)) { 1314 trace_clk_unprepare(core); 1315 if (core->ops->unprepare_unused) 1316 core->ops->unprepare_unused(core->hw); 1317 else if (core->ops->unprepare) 1318 core->ops->unprepare(core->hw); 1319 trace_clk_unprepare_complete(core); 1320 } 1321 1322 clk_pm_runtime_put(core); 1323 } 1324 1325 static void __init clk_disable_unused_subtree(struct clk_core *core) 1326 { 1327 struct clk_core *child; 1328 unsigned long flags; 1329 1330 lockdep_assert_held(&prepare_lock); 1331 1332 hlist_for_each_entry(child, &core->children, child_node) 1333 clk_disable_unused_subtree(child); 1334 1335 if (core->flags & CLK_OPS_PARENT_ENABLE) 1336 clk_core_prepare_enable(core->parent); 1337 1338 if (clk_pm_runtime_get(core)) 1339 goto unprepare_out; 1340 1341 flags = clk_enable_lock(); 1342 1343 if (core->enable_count) 1344 goto unlock_out; 1345 1346 if (core->flags & CLK_IGNORE_UNUSED) 1347 goto unlock_out; 1348 1349 /* 1350 * some gate clocks have special needs during the disable-unused 1351 * sequence. call .disable_unused if available, otherwise fall 1352 * back to .disable 1353 */ 1354 if (clk_core_is_enabled(core)) { 1355 trace_clk_disable(core); 1356 if (core->ops->disable_unused) 1357 core->ops->disable_unused(core->hw); 1358 else if (core->ops->disable) 1359 core->ops->disable(core->hw); 1360 trace_clk_disable_complete(core); 1361 } 1362 1363 unlock_out: 1364 clk_enable_unlock(flags); 1365 clk_pm_runtime_put(core); 1366 unprepare_out: 1367 if (core->flags & CLK_OPS_PARENT_ENABLE) 1368 clk_core_disable_unprepare(core->parent); 1369 } 1370 1371 static bool clk_ignore_unused __initdata; 1372 static int __init clk_ignore_unused_setup(char *__unused) 1373 { 1374 clk_ignore_unused = true; 1375 return 1; 1376 } 1377 __setup("clk_ignore_unused", clk_ignore_unused_setup); 1378 1379 static int __init clk_disable_unused(void) 1380 { 1381 struct clk_core *core; 1382 1383 if (clk_ignore_unused) { 1384 pr_warn("clk: Not disabling unused clocks\n"); 1385 return 0; 1386 } 1387 1388 clk_prepare_lock(); 1389 1390 hlist_for_each_entry(core, &clk_root_list, child_node) 1391 clk_disable_unused_subtree(core); 1392 1393 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1394 clk_disable_unused_subtree(core); 1395 1396 hlist_for_each_entry(core, &clk_root_list, child_node) 1397 clk_unprepare_unused_subtree(core); 1398 1399 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1400 clk_unprepare_unused_subtree(core); 1401 1402 clk_prepare_unlock(); 1403 1404 return 0; 1405 } 1406 late_initcall_sync(clk_disable_unused); 1407 1408 static int clk_core_determine_round_nolock(struct clk_core *core, 1409 struct clk_rate_request *req) 1410 { 1411 long rate; 1412 1413 lockdep_assert_held(&prepare_lock); 1414 1415 if (!core) 1416 return 0; 1417 1418 /* 1419 * Some clock providers hand-craft their clk_rate_requests and 1420 * might not fill min_rate and max_rate. 1421 * 1422 * If it's the case, clamping the rate is equivalent to setting 1423 * the rate to 0 which is bad. Skip the clamping but complain so 1424 * that it gets fixed, hopefully. 1425 */ 1426 if (!req->min_rate && !req->max_rate) 1427 pr_warn("%s: %s: clk_rate_request has initialized min or max rate.\n", 1428 __func__, core->name); 1429 else 1430 req->rate = clamp(req->rate, req->min_rate, req->max_rate); 1431 1432 /* 1433 * At this point, core protection will be disabled 1434 * - if the provider is not protected at all 1435 * - if the calling consumer is the only one which has exclusivity 1436 * over the provider 1437 */ 1438 if (clk_core_rate_is_protected(core)) { 1439 req->rate = core->rate; 1440 } else if (core->ops->determine_rate) { 1441 return core->ops->determine_rate(core->hw, req); 1442 } else if (core->ops->round_rate) { 1443 rate = core->ops->round_rate(core->hw, req->rate, 1444 &req->best_parent_rate); 1445 if (rate < 0) 1446 return rate; 1447 1448 req->rate = rate; 1449 } else { 1450 return -EINVAL; 1451 } 1452 1453 return 0; 1454 } 1455 1456 static void clk_core_init_rate_req(struct clk_core * const core, 1457 struct clk_rate_request *req, 1458 unsigned long rate) 1459 { 1460 struct clk_core *parent; 1461 1462 if (WARN_ON(!core || !req)) 1463 return; 1464 1465 memset(req, 0, sizeof(*req)); 1466 1467 req->rate = rate; 1468 clk_core_get_boundaries(core, &req->min_rate, &req->max_rate); 1469 1470 parent = core->parent; 1471 if (parent) { 1472 req->best_parent_hw = parent->hw; 1473 req->best_parent_rate = parent->rate; 1474 } else { 1475 req->best_parent_hw = NULL; 1476 req->best_parent_rate = 0; 1477 } 1478 } 1479 1480 /** 1481 * clk_hw_init_rate_request - Initializes a clk_rate_request 1482 * @hw: the clk for which we want to submit a rate request 1483 * @req: the clk_rate_request structure we want to initialise 1484 * @rate: the rate which is to be requested 1485 * 1486 * Initializes a clk_rate_request structure to submit to 1487 * __clk_determine_rate() or similar functions. 1488 */ 1489 void clk_hw_init_rate_request(const struct clk_hw *hw, 1490 struct clk_rate_request *req, 1491 unsigned long rate) 1492 { 1493 if (WARN_ON(!hw || !req)) 1494 return; 1495 1496 clk_core_init_rate_req(hw->core, req, rate); 1497 } 1498 EXPORT_SYMBOL_GPL(clk_hw_init_rate_request); 1499 1500 /** 1501 * clk_hw_forward_rate_request - Forwards a clk_rate_request to a clock's parent 1502 * @hw: the original clock that got the rate request 1503 * @old_req: the original clk_rate_request structure we want to forward 1504 * @parent: the clk we want to forward @old_req to 1505 * @req: the clk_rate_request structure we want to initialise 1506 * @parent_rate: The rate which is to be requested to @parent 1507 * 1508 * Initializes a clk_rate_request structure to submit to a clock parent 1509 * in __clk_determine_rate() or similar functions. 1510 */ 1511 void clk_hw_forward_rate_request(const struct clk_hw *hw, 1512 const struct clk_rate_request *old_req, 1513 const struct clk_hw *parent, 1514 struct clk_rate_request *req, 1515 unsigned long parent_rate) 1516 { 1517 if (WARN_ON(!hw || !old_req || !parent || !req)) 1518 return; 1519 1520 clk_core_forward_rate_req(hw->core, old_req, 1521 parent->core, req, 1522 parent_rate); 1523 } 1524 1525 static bool clk_core_can_round(struct clk_core * const core) 1526 { 1527 return core->ops->determine_rate || core->ops->round_rate; 1528 } 1529 1530 static int clk_core_round_rate_nolock(struct clk_core *core, 1531 struct clk_rate_request *req) 1532 { 1533 int ret; 1534 1535 lockdep_assert_held(&prepare_lock); 1536 1537 if (!core) { 1538 req->rate = 0; 1539 return 0; 1540 } 1541 1542 if (clk_core_can_round(core)) 1543 return clk_core_determine_round_nolock(core, req); 1544 1545 if (core->flags & CLK_SET_RATE_PARENT) { 1546 struct clk_rate_request parent_req; 1547 1548 clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate); 1549 ret = clk_core_round_rate_nolock(core->parent, &parent_req); 1550 if (ret) 1551 return ret; 1552 1553 req->best_parent_rate = parent_req.rate; 1554 req->rate = parent_req.rate; 1555 1556 return 0; 1557 } 1558 1559 req->rate = core->rate; 1560 return 0; 1561 } 1562 1563 /** 1564 * __clk_determine_rate - get the closest rate actually supported by a clock 1565 * @hw: determine the rate of this clock 1566 * @req: target rate request 1567 * 1568 * Useful for clk_ops such as .set_rate and .determine_rate. 1569 */ 1570 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 1571 { 1572 if (!hw) { 1573 req->rate = 0; 1574 return 0; 1575 } 1576 1577 return clk_core_round_rate_nolock(hw->core, req); 1578 } 1579 EXPORT_SYMBOL_GPL(__clk_determine_rate); 1580 1581 /** 1582 * clk_hw_round_rate() - round the given rate for a hw clk 1583 * @hw: the hw clk for which we are rounding a rate 1584 * @rate: the rate which is to be rounded 1585 * 1586 * Takes in a rate as input and rounds it to a rate that the clk can actually 1587 * use. 1588 * 1589 * Context: prepare_lock must be held. 1590 * For clk providers to call from within clk_ops such as .round_rate, 1591 * .determine_rate. 1592 * 1593 * Return: returns rounded rate of hw clk if clk supports round_rate operation 1594 * else returns the parent rate. 1595 */ 1596 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) 1597 { 1598 int ret; 1599 struct clk_rate_request req; 1600 1601 clk_core_init_rate_req(hw->core, &req, rate); 1602 1603 ret = clk_core_round_rate_nolock(hw->core, &req); 1604 if (ret) 1605 return 0; 1606 1607 return req.rate; 1608 } 1609 EXPORT_SYMBOL_GPL(clk_hw_round_rate); 1610 1611 /** 1612 * clk_round_rate - round the given rate for a clk 1613 * @clk: the clk for which we are rounding a rate 1614 * @rate: the rate which is to be rounded 1615 * 1616 * Takes in a rate as input and rounds it to a rate that the clk can actually 1617 * use which is then returned. If clk doesn't support round_rate operation 1618 * then the parent rate is returned. 1619 */ 1620 long clk_round_rate(struct clk *clk, unsigned long rate) 1621 { 1622 struct clk_rate_request req; 1623 int ret; 1624 1625 if (!clk) 1626 return 0; 1627 1628 clk_prepare_lock(); 1629 1630 if (clk->exclusive_count) 1631 clk_core_rate_unprotect(clk->core); 1632 1633 clk_core_init_rate_req(clk->core, &req, rate); 1634 1635 ret = clk_core_round_rate_nolock(clk->core, &req); 1636 1637 if (clk->exclusive_count) 1638 clk_core_rate_protect(clk->core); 1639 1640 clk_prepare_unlock(); 1641 1642 if (ret) 1643 return ret; 1644 1645 return req.rate; 1646 } 1647 EXPORT_SYMBOL_GPL(clk_round_rate); 1648 1649 /** 1650 * __clk_notify - call clk notifier chain 1651 * @core: clk that is changing rate 1652 * @msg: clk notifier type (see include/linux/clk.h) 1653 * @old_rate: old clk rate 1654 * @new_rate: new clk rate 1655 * 1656 * Triggers a notifier call chain on the clk rate-change notification 1657 * for 'clk'. Passes a pointer to the struct clk and the previous 1658 * and current rates to the notifier callback. Intended to be called by 1659 * internal clock code only. Returns NOTIFY_DONE from the last driver 1660 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1661 * a driver returns that. 1662 */ 1663 static int __clk_notify(struct clk_core *core, unsigned long msg, 1664 unsigned long old_rate, unsigned long new_rate) 1665 { 1666 struct clk_notifier *cn; 1667 struct clk_notifier_data cnd; 1668 int ret = NOTIFY_DONE; 1669 1670 cnd.old_rate = old_rate; 1671 cnd.new_rate = new_rate; 1672 1673 list_for_each_entry(cn, &clk_notifier_list, node) { 1674 if (cn->clk->core == core) { 1675 cnd.clk = cn->clk; 1676 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1677 &cnd); 1678 if (ret & NOTIFY_STOP_MASK) 1679 return ret; 1680 } 1681 } 1682 1683 return ret; 1684 } 1685 1686 /** 1687 * __clk_recalc_accuracies 1688 * @core: first clk in the subtree 1689 * 1690 * Walks the subtree of clks starting with clk and recalculates accuracies as 1691 * it goes. Note that if a clk does not implement the .recalc_accuracy 1692 * callback then it is assumed that the clock will take on the accuracy of its 1693 * parent. 1694 */ 1695 static void __clk_recalc_accuracies(struct clk_core *core) 1696 { 1697 unsigned long parent_accuracy = 0; 1698 struct clk_core *child; 1699 1700 lockdep_assert_held(&prepare_lock); 1701 1702 if (core->parent) 1703 parent_accuracy = core->parent->accuracy; 1704 1705 if (core->ops->recalc_accuracy) 1706 core->accuracy = core->ops->recalc_accuracy(core->hw, 1707 parent_accuracy); 1708 else 1709 core->accuracy = parent_accuracy; 1710 1711 hlist_for_each_entry(child, &core->children, child_node) 1712 __clk_recalc_accuracies(child); 1713 } 1714 1715 static long clk_core_get_accuracy_recalc(struct clk_core *core) 1716 { 1717 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) 1718 __clk_recalc_accuracies(core); 1719 1720 return clk_core_get_accuracy_no_lock(core); 1721 } 1722 1723 /** 1724 * clk_get_accuracy - return the accuracy of clk 1725 * @clk: the clk whose accuracy is being returned 1726 * 1727 * Simply returns the cached accuracy of the clk, unless 1728 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be 1729 * issued. 1730 * If clk is NULL then returns 0. 1731 */ 1732 long clk_get_accuracy(struct clk *clk) 1733 { 1734 long accuracy; 1735 1736 if (!clk) 1737 return 0; 1738 1739 clk_prepare_lock(); 1740 accuracy = clk_core_get_accuracy_recalc(clk->core); 1741 clk_prepare_unlock(); 1742 1743 return accuracy; 1744 } 1745 EXPORT_SYMBOL_GPL(clk_get_accuracy); 1746 1747 static unsigned long clk_recalc(struct clk_core *core, 1748 unsigned long parent_rate) 1749 { 1750 unsigned long rate = parent_rate; 1751 1752 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { 1753 rate = core->ops->recalc_rate(core->hw, parent_rate); 1754 clk_pm_runtime_put(core); 1755 } 1756 return rate; 1757 } 1758 1759 /** 1760 * __clk_recalc_rates 1761 * @core: first clk in the subtree 1762 * @update_req: Whether req_rate should be updated with the new rate 1763 * @msg: notification type (see include/linux/clk.h) 1764 * 1765 * Walks the subtree of clks starting with clk and recalculates rates as it 1766 * goes. Note that if a clk does not implement the .recalc_rate callback then 1767 * it is assumed that the clock will take on the rate of its parent. 1768 * 1769 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1770 * if necessary. 1771 */ 1772 static void __clk_recalc_rates(struct clk_core *core, bool update_req, 1773 unsigned long msg) 1774 { 1775 unsigned long old_rate; 1776 unsigned long parent_rate = 0; 1777 struct clk_core *child; 1778 1779 lockdep_assert_held(&prepare_lock); 1780 1781 old_rate = core->rate; 1782 1783 if (core->parent) 1784 parent_rate = core->parent->rate; 1785 1786 core->rate = clk_recalc(core, parent_rate); 1787 if (update_req) 1788 core->req_rate = core->rate; 1789 1790 /* 1791 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1792 * & ABORT_RATE_CHANGE notifiers 1793 */ 1794 if (core->notifier_count && msg) 1795 __clk_notify(core, msg, old_rate, core->rate); 1796 1797 hlist_for_each_entry(child, &core->children, child_node) 1798 __clk_recalc_rates(child, update_req, msg); 1799 } 1800 1801 static unsigned long clk_core_get_rate_recalc(struct clk_core *core) 1802 { 1803 if (core && (core->flags & CLK_GET_RATE_NOCACHE)) 1804 __clk_recalc_rates(core, false, 0); 1805 1806 return clk_core_get_rate_nolock(core); 1807 } 1808 1809 /** 1810 * clk_get_rate - return the rate of clk 1811 * @clk: the clk whose rate is being returned 1812 * 1813 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 1814 * is set, which means a recalc_rate will be issued. Can be called regardless of 1815 * the clock enabledness. If clk is NULL, or if an error occurred, then returns 1816 * 0. 1817 */ 1818 unsigned long clk_get_rate(struct clk *clk) 1819 { 1820 unsigned long rate; 1821 1822 if (!clk) 1823 return 0; 1824 1825 clk_prepare_lock(); 1826 rate = clk_core_get_rate_recalc(clk->core); 1827 clk_prepare_unlock(); 1828 1829 return rate; 1830 } 1831 EXPORT_SYMBOL_GPL(clk_get_rate); 1832 1833 static int clk_fetch_parent_index(struct clk_core *core, 1834 struct clk_core *parent) 1835 { 1836 int i; 1837 1838 if (!parent) 1839 return -EINVAL; 1840 1841 for (i = 0; i < core->num_parents; i++) { 1842 /* Found it first try! */ 1843 if (core->parents[i].core == parent) 1844 return i; 1845 1846 /* Something else is here, so keep looking */ 1847 if (core->parents[i].core) 1848 continue; 1849 1850 /* Maybe core hasn't been cached but the hw is all we know? */ 1851 if (core->parents[i].hw) { 1852 if (core->parents[i].hw == parent->hw) 1853 break; 1854 1855 /* Didn't match, but we're expecting a clk_hw */ 1856 continue; 1857 } 1858 1859 /* Maybe it hasn't been cached (clk_set_parent() path) */ 1860 if (parent == clk_core_get(core, i)) 1861 break; 1862 1863 /* Fallback to comparing globally unique names */ 1864 if (core->parents[i].name && 1865 !strcmp(parent->name, core->parents[i].name)) 1866 break; 1867 } 1868 1869 if (i == core->num_parents) 1870 return -EINVAL; 1871 1872 core->parents[i].core = parent; 1873 return i; 1874 } 1875 1876 /** 1877 * clk_hw_get_parent_index - return the index of the parent clock 1878 * @hw: clk_hw associated with the clk being consumed 1879 * 1880 * Fetches and returns the index of parent clock. Returns -EINVAL if the given 1881 * clock does not have a current parent. 1882 */ 1883 int clk_hw_get_parent_index(struct clk_hw *hw) 1884 { 1885 struct clk_hw *parent = clk_hw_get_parent(hw); 1886 1887 if (WARN_ON(parent == NULL)) 1888 return -EINVAL; 1889 1890 return clk_fetch_parent_index(hw->core, parent->core); 1891 } 1892 EXPORT_SYMBOL_GPL(clk_hw_get_parent_index); 1893 1894 /* 1895 * Update the orphan status of @core and all its children. 1896 */ 1897 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) 1898 { 1899 struct clk_core *child; 1900 1901 core->orphan = is_orphan; 1902 1903 hlist_for_each_entry(child, &core->children, child_node) 1904 clk_core_update_orphan_status(child, is_orphan); 1905 } 1906 1907 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) 1908 { 1909 bool was_orphan = core->orphan; 1910 1911 hlist_del(&core->child_node); 1912 1913 if (new_parent) { 1914 bool becomes_orphan = new_parent->orphan; 1915 1916 /* avoid duplicate POST_RATE_CHANGE notifications */ 1917 if (new_parent->new_child == core) 1918 new_parent->new_child = NULL; 1919 1920 hlist_add_head(&core->child_node, &new_parent->children); 1921 1922 if (was_orphan != becomes_orphan) 1923 clk_core_update_orphan_status(core, becomes_orphan); 1924 } else { 1925 hlist_add_head(&core->child_node, &clk_orphan_list); 1926 if (!was_orphan) 1927 clk_core_update_orphan_status(core, true); 1928 } 1929 1930 core->parent = new_parent; 1931 } 1932 1933 static struct clk_core *__clk_set_parent_before(struct clk_core *core, 1934 struct clk_core *parent) 1935 { 1936 unsigned long flags; 1937 struct clk_core *old_parent = core->parent; 1938 1939 /* 1940 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock 1941 * 1942 * 2. Migrate prepare state between parents and prevent race with 1943 * clk_enable(). 1944 * 1945 * If the clock is not prepared, then a race with 1946 * clk_enable/disable() is impossible since we already have the 1947 * prepare lock (future calls to clk_enable() need to be preceded by 1948 * a clk_prepare()). 1949 * 1950 * If the clock is prepared, migrate the prepared state to the new 1951 * parent and also protect against a race with clk_enable() by 1952 * forcing the clock and the new parent on. This ensures that all 1953 * future calls to clk_enable() are practically NOPs with respect to 1954 * hardware and software states. 1955 * 1956 * See also: Comment for clk_set_parent() below. 1957 */ 1958 1959 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ 1960 if (core->flags & CLK_OPS_PARENT_ENABLE) { 1961 clk_core_prepare_enable(old_parent); 1962 clk_core_prepare_enable(parent); 1963 } 1964 1965 /* migrate prepare count if > 0 */ 1966 if (core->prepare_count) { 1967 clk_core_prepare_enable(parent); 1968 clk_core_enable_lock(core); 1969 } 1970 1971 /* update the clk tree topology */ 1972 flags = clk_enable_lock(); 1973 clk_reparent(core, parent); 1974 clk_enable_unlock(flags); 1975 1976 return old_parent; 1977 } 1978 1979 static void __clk_set_parent_after(struct clk_core *core, 1980 struct clk_core *parent, 1981 struct clk_core *old_parent) 1982 { 1983 /* 1984 * Finish the migration of prepare state and undo the changes done 1985 * for preventing a race with clk_enable(). 1986 */ 1987 if (core->prepare_count) { 1988 clk_core_disable_lock(core); 1989 clk_core_disable_unprepare(old_parent); 1990 } 1991 1992 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ 1993 if (core->flags & CLK_OPS_PARENT_ENABLE) { 1994 clk_core_disable_unprepare(parent); 1995 clk_core_disable_unprepare(old_parent); 1996 } 1997 } 1998 1999 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, 2000 u8 p_index) 2001 { 2002 unsigned long flags; 2003 int ret = 0; 2004 struct clk_core *old_parent; 2005 2006 old_parent = __clk_set_parent_before(core, parent); 2007 2008 trace_clk_set_parent(core, parent); 2009 2010 /* change clock input source */ 2011 if (parent && core->ops->set_parent) 2012 ret = core->ops->set_parent(core->hw, p_index); 2013 2014 trace_clk_set_parent_complete(core, parent); 2015 2016 if (ret) { 2017 flags = clk_enable_lock(); 2018 clk_reparent(core, old_parent); 2019 clk_enable_unlock(flags); 2020 2021 __clk_set_parent_after(core, old_parent, parent); 2022 2023 return ret; 2024 } 2025 2026 __clk_set_parent_after(core, parent, old_parent); 2027 2028 return 0; 2029 } 2030 2031 /** 2032 * __clk_speculate_rates 2033 * @core: first clk in the subtree 2034 * @parent_rate: the "future" rate of clk's parent 2035 * 2036 * Walks the subtree of clks starting with clk, speculating rates as it 2037 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 2038 * 2039 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 2040 * pre-rate change notifications and returns early if no clks in the 2041 * subtree have subscribed to the notifications. Note that if a clk does not 2042 * implement the .recalc_rate callback then it is assumed that the clock will 2043 * take on the rate of its parent. 2044 */ 2045 static int __clk_speculate_rates(struct clk_core *core, 2046 unsigned long parent_rate) 2047 { 2048 struct clk_core *child; 2049 unsigned long new_rate; 2050 int ret = NOTIFY_DONE; 2051 2052 lockdep_assert_held(&prepare_lock); 2053 2054 new_rate = clk_recalc(core, parent_rate); 2055 2056 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 2057 if (core->notifier_count) 2058 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); 2059 2060 if (ret & NOTIFY_STOP_MASK) { 2061 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", 2062 __func__, core->name, ret); 2063 goto out; 2064 } 2065 2066 hlist_for_each_entry(child, &core->children, child_node) { 2067 ret = __clk_speculate_rates(child, new_rate); 2068 if (ret & NOTIFY_STOP_MASK) 2069 break; 2070 } 2071 2072 out: 2073 return ret; 2074 } 2075 2076 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, 2077 struct clk_core *new_parent, u8 p_index) 2078 { 2079 struct clk_core *child; 2080 2081 core->new_rate = new_rate; 2082 core->new_parent = new_parent; 2083 core->new_parent_index = p_index; 2084 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 2085 core->new_child = NULL; 2086 if (new_parent && new_parent != core->parent) 2087 new_parent->new_child = core; 2088 2089 hlist_for_each_entry(child, &core->children, child_node) { 2090 child->new_rate = clk_recalc(child, new_rate); 2091 clk_calc_subtree(child, child->new_rate, NULL, 0); 2092 } 2093 } 2094 2095 /* 2096 * calculate the new rates returning the topmost clock that has to be 2097 * changed. 2098 */ 2099 static struct clk_core *clk_calc_new_rates(struct clk_core *core, 2100 unsigned long rate) 2101 { 2102 struct clk_core *top = core; 2103 struct clk_core *old_parent, *parent; 2104 unsigned long best_parent_rate = 0; 2105 unsigned long new_rate; 2106 unsigned long min_rate; 2107 unsigned long max_rate; 2108 int p_index = 0; 2109 long ret; 2110 2111 /* sanity */ 2112 if (IS_ERR_OR_NULL(core)) 2113 return NULL; 2114 2115 /* save parent rate, if it exists */ 2116 parent = old_parent = core->parent; 2117 if (parent) 2118 best_parent_rate = parent->rate; 2119 2120 clk_core_get_boundaries(core, &min_rate, &max_rate); 2121 2122 /* find the closest rate and parent clk/rate */ 2123 if (clk_core_can_round(core)) { 2124 struct clk_rate_request req; 2125 2126 clk_core_init_rate_req(core, &req, rate); 2127 2128 ret = clk_core_determine_round_nolock(core, &req); 2129 if (ret < 0) 2130 return NULL; 2131 2132 best_parent_rate = req.best_parent_rate; 2133 new_rate = req.rate; 2134 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; 2135 2136 if (new_rate < min_rate || new_rate > max_rate) 2137 return NULL; 2138 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { 2139 /* pass-through clock without adjustable parent */ 2140 core->new_rate = core->rate; 2141 return NULL; 2142 } else { 2143 /* pass-through clock with adjustable parent */ 2144 top = clk_calc_new_rates(parent, rate); 2145 new_rate = parent->new_rate; 2146 goto out; 2147 } 2148 2149 /* some clocks must be gated to change parent */ 2150 if (parent != old_parent && 2151 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { 2152 pr_debug("%s: %s not gated but wants to reparent\n", 2153 __func__, core->name); 2154 return NULL; 2155 } 2156 2157 /* try finding the new parent index */ 2158 if (parent && core->num_parents > 1) { 2159 p_index = clk_fetch_parent_index(core, parent); 2160 if (p_index < 0) { 2161 pr_debug("%s: clk %s can not be parent of clk %s\n", 2162 __func__, parent->name, core->name); 2163 return NULL; 2164 } 2165 } 2166 2167 if ((core->flags & CLK_SET_RATE_PARENT) && parent && 2168 best_parent_rate != parent->rate) 2169 top = clk_calc_new_rates(parent, best_parent_rate); 2170 2171 out: 2172 clk_calc_subtree(core, new_rate, parent, p_index); 2173 2174 return top; 2175 } 2176 2177 /* 2178 * Notify about rate changes in a subtree. Always walk down the whole tree 2179 * so that in case of an error we can walk down the whole tree again and 2180 * abort the change. 2181 */ 2182 static struct clk_core *clk_propagate_rate_change(struct clk_core *core, 2183 unsigned long event) 2184 { 2185 struct clk_core *child, *tmp_clk, *fail_clk = NULL; 2186 int ret = NOTIFY_DONE; 2187 2188 if (core->rate == core->new_rate) 2189 return NULL; 2190 2191 if (core->notifier_count) { 2192 ret = __clk_notify(core, event, core->rate, core->new_rate); 2193 if (ret & NOTIFY_STOP_MASK) 2194 fail_clk = core; 2195 } 2196 2197 hlist_for_each_entry(child, &core->children, child_node) { 2198 /* Skip children who will be reparented to another clock */ 2199 if (child->new_parent && child->new_parent != core) 2200 continue; 2201 tmp_clk = clk_propagate_rate_change(child, event); 2202 if (tmp_clk) 2203 fail_clk = tmp_clk; 2204 } 2205 2206 /* handle the new child who might not be in core->children yet */ 2207 if (core->new_child) { 2208 tmp_clk = clk_propagate_rate_change(core->new_child, event); 2209 if (tmp_clk) 2210 fail_clk = tmp_clk; 2211 } 2212 2213 return fail_clk; 2214 } 2215 2216 /* 2217 * walk down a subtree and set the new rates notifying the rate 2218 * change on the way 2219 */ 2220 static void clk_change_rate(struct clk_core *core) 2221 { 2222 struct clk_core *child; 2223 struct hlist_node *tmp; 2224 unsigned long old_rate; 2225 unsigned long best_parent_rate = 0; 2226 bool skip_set_rate = false; 2227 struct clk_core *old_parent; 2228 struct clk_core *parent = NULL; 2229 2230 old_rate = core->rate; 2231 2232 if (core->new_parent) { 2233 parent = core->new_parent; 2234 best_parent_rate = core->new_parent->rate; 2235 } else if (core->parent) { 2236 parent = core->parent; 2237 best_parent_rate = core->parent->rate; 2238 } 2239 2240 if (clk_pm_runtime_get(core)) 2241 return; 2242 2243 if (core->flags & CLK_SET_RATE_UNGATE) { 2244 clk_core_prepare(core); 2245 clk_core_enable_lock(core); 2246 } 2247 2248 if (core->new_parent && core->new_parent != core->parent) { 2249 old_parent = __clk_set_parent_before(core, core->new_parent); 2250 trace_clk_set_parent(core, core->new_parent); 2251 2252 if (core->ops->set_rate_and_parent) { 2253 skip_set_rate = true; 2254 core->ops->set_rate_and_parent(core->hw, core->new_rate, 2255 best_parent_rate, 2256 core->new_parent_index); 2257 } else if (core->ops->set_parent) { 2258 core->ops->set_parent(core->hw, core->new_parent_index); 2259 } 2260 2261 trace_clk_set_parent_complete(core, core->new_parent); 2262 __clk_set_parent_after(core, core->new_parent, old_parent); 2263 } 2264 2265 if (core->flags & CLK_OPS_PARENT_ENABLE) 2266 clk_core_prepare_enable(parent); 2267 2268 trace_clk_set_rate(core, core->new_rate); 2269 2270 if (!skip_set_rate && core->ops->set_rate) 2271 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); 2272 2273 trace_clk_set_rate_complete(core, core->new_rate); 2274 2275 core->rate = clk_recalc(core, best_parent_rate); 2276 2277 if (core->flags & CLK_SET_RATE_UNGATE) { 2278 clk_core_disable_lock(core); 2279 clk_core_unprepare(core); 2280 } 2281 2282 if (core->flags & CLK_OPS_PARENT_ENABLE) 2283 clk_core_disable_unprepare(parent); 2284 2285 if (core->notifier_count && old_rate != core->rate) 2286 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); 2287 2288 if (core->flags & CLK_RECALC_NEW_RATES) 2289 (void)clk_calc_new_rates(core, core->new_rate); 2290 2291 /* 2292 * Use safe iteration, as change_rate can actually swap parents 2293 * for certain clock types. 2294 */ 2295 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { 2296 /* Skip children who will be reparented to another clock */ 2297 if (child->new_parent && child->new_parent != core) 2298 continue; 2299 clk_change_rate(child); 2300 } 2301 2302 /* handle the new child who might not be in core->children yet */ 2303 if (core->new_child) 2304 clk_change_rate(core->new_child); 2305 2306 clk_pm_runtime_put(core); 2307 } 2308 2309 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, 2310 unsigned long req_rate) 2311 { 2312 int ret, cnt; 2313 struct clk_rate_request req; 2314 2315 lockdep_assert_held(&prepare_lock); 2316 2317 if (!core) 2318 return 0; 2319 2320 /* simulate what the rate would be if it could be freely set */ 2321 cnt = clk_core_rate_nuke_protect(core); 2322 if (cnt < 0) 2323 return cnt; 2324 2325 clk_core_init_rate_req(core, &req, req_rate); 2326 2327 ret = clk_core_round_rate_nolock(core, &req); 2328 2329 /* restore the protection */ 2330 clk_core_rate_restore_protect(core, cnt); 2331 2332 return ret ? 0 : req.rate; 2333 } 2334 2335 static int clk_core_set_rate_nolock(struct clk_core *core, 2336 unsigned long req_rate) 2337 { 2338 struct clk_core *top, *fail_clk; 2339 unsigned long rate; 2340 int ret; 2341 2342 if (!core) 2343 return 0; 2344 2345 rate = clk_core_req_round_rate_nolock(core, req_rate); 2346 2347 /* bail early if nothing to do */ 2348 if (rate == clk_core_get_rate_nolock(core)) 2349 return 0; 2350 2351 /* fail on a direct rate set of a protected provider */ 2352 if (clk_core_rate_is_protected(core)) 2353 return -EBUSY; 2354 2355 /* calculate new rates and get the topmost changed clock */ 2356 top = clk_calc_new_rates(core, req_rate); 2357 if (!top) 2358 return -EINVAL; 2359 2360 ret = clk_pm_runtime_get(core); 2361 if (ret) 2362 return ret; 2363 2364 /* notify that we are about to change rates */ 2365 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 2366 if (fail_clk) { 2367 pr_debug("%s: failed to set %s rate\n", __func__, 2368 fail_clk->name); 2369 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 2370 ret = -EBUSY; 2371 goto err; 2372 } 2373 2374 /* change the rates */ 2375 clk_change_rate(top); 2376 2377 core->req_rate = req_rate; 2378 err: 2379 clk_pm_runtime_put(core); 2380 2381 return ret; 2382 } 2383 2384 /** 2385 * clk_set_rate - specify a new rate for clk 2386 * @clk: the clk whose rate is being changed 2387 * @rate: the new rate for clk 2388 * 2389 * In the simplest case clk_set_rate will only adjust the rate of clk. 2390 * 2391 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 2392 * propagate up to clk's parent; whether or not this happens depends on the 2393 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 2394 * after calling .round_rate then upstream parent propagation is ignored. If 2395 * *parent_rate comes back with a new rate for clk's parent then we propagate 2396 * up to clk's parent and set its rate. Upward propagation will continue 2397 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 2398 * .round_rate stops requesting changes to clk's parent_rate. 2399 * 2400 * Rate changes are accomplished via tree traversal that also recalculates the 2401 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 2402 * 2403 * Returns 0 on success, -EERROR otherwise. 2404 */ 2405 int clk_set_rate(struct clk *clk, unsigned long rate) 2406 { 2407 int ret; 2408 2409 if (!clk) 2410 return 0; 2411 2412 /* prevent racing with updates to the clock topology */ 2413 clk_prepare_lock(); 2414 2415 if (clk->exclusive_count) 2416 clk_core_rate_unprotect(clk->core); 2417 2418 ret = clk_core_set_rate_nolock(clk->core, rate); 2419 2420 if (clk->exclusive_count) 2421 clk_core_rate_protect(clk->core); 2422 2423 clk_prepare_unlock(); 2424 2425 return ret; 2426 } 2427 EXPORT_SYMBOL_GPL(clk_set_rate); 2428 2429 /** 2430 * clk_set_rate_exclusive - specify a new rate and get exclusive control 2431 * @clk: the clk whose rate is being changed 2432 * @rate: the new rate for clk 2433 * 2434 * This is a combination of clk_set_rate() and clk_rate_exclusive_get() 2435 * within a critical section 2436 * 2437 * This can be used initially to ensure that at least 1 consumer is 2438 * satisfied when several consumers are competing for exclusivity over the 2439 * same clock provider. 2440 * 2441 * The exclusivity is not applied if setting the rate failed. 2442 * 2443 * Calls to clk_rate_exclusive_get() should be balanced with calls to 2444 * clk_rate_exclusive_put(). 2445 * 2446 * Returns 0 on success, -EERROR otherwise. 2447 */ 2448 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 2449 { 2450 int ret; 2451 2452 if (!clk) 2453 return 0; 2454 2455 /* prevent racing with updates to the clock topology */ 2456 clk_prepare_lock(); 2457 2458 /* 2459 * The temporary protection removal is not here, on purpose 2460 * This function is meant to be used instead of clk_rate_protect, 2461 * so before the consumer code path protect the clock provider 2462 */ 2463 2464 ret = clk_core_set_rate_nolock(clk->core, rate); 2465 if (!ret) { 2466 clk_core_rate_protect(clk->core); 2467 clk->exclusive_count++; 2468 } 2469 2470 clk_prepare_unlock(); 2471 2472 return ret; 2473 } 2474 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); 2475 2476 static int clk_set_rate_range_nolock(struct clk *clk, 2477 unsigned long min, 2478 unsigned long max) 2479 { 2480 int ret = 0; 2481 unsigned long old_min, old_max, rate; 2482 2483 lockdep_assert_held(&prepare_lock); 2484 2485 if (!clk) 2486 return 0; 2487 2488 trace_clk_set_rate_range(clk->core, min, max); 2489 2490 if (min > max) { 2491 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", 2492 __func__, clk->core->name, clk->dev_id, clk->con_id, 2493 min, max); 2494 return -EINVAL; 2495 } 2496 2497 if (clk->exclusive_count) 2498 clk_core_rate_unprotect(clk->core); 2499 2500 /* Save the current values in case we need to rollback the change */ 2501 old_min = clk->min_rate; 2502 old_max = clk->max_rate; 2503 clk->min_rate = min; 2504 clk->max_rate = max; 2505 2506 if (!clk_core_check_boundaries(clk->core, min, max)) { 2507 ret = -EINVAL; 2508 goto out; 2509 } 2510 2511 rate = clk->core->req_rate; 2512 if (clk->core->flags & CLK_GET_RATE_NOCACHE) 2513 rate = clk_core_get_rate_recalc(clk->core); 2514 2515 /* 2516 * Since the boundaries have been changed, let's give the 2517 * opportunity to the provider to adjust the clock rate based on 2518 * the new boundaries. 2519 * 2520 * We also need to handle the case where the clock is currently 2521 * outside of the boundaries. Clamping the last requested rate 2522 * to the current minimum and maximum will also handle this. 2523 * 2524 * FIXME: 2525 * There is a catch. It may fail for the usual reason (clock 2526 * broken, clock protected, etc) but also because: 2527 * - round_rate() was not favorable and fell on the wrong 2528 * side of the boundary 2529 * - the determine_rate() callback does not really check for 2530 * this corner case when determining the rate 2531 */ 2532 rate = clamp(rate, min, max); 2533 ret = clk_core_set_rate_nolock(clk->core, rate); 2534 if (ret) { 2535 /* rollback the changes */ 2536 clk->min_rate = old_min; 2537 clk->max_rate = old_max; 2538 } 2539 2540 out: 2541 if (clk->exclusive_count) 2542 clk_core_rate_protect(clk->core); 2543 2544 return ret; 2545 } 2546 2547 /** 2548 * clk_set_rate_range - set a rate range for a clock source 2549 * @clk: clock source 2550 * @min: desired minimum clock rate in Hz, inclusive 2551 * @max: desired maximum clock rate in Hz, inclusive 2552 * 2553 * Return: 0 for success or negative errno on failure. 2554 */ 2555 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) 2556 { 2557 int ret; 2558 2559 if (!clk) 2560 return 0; 2561 2562 clk_prepare_lock(); 2563 2564 ret = clk_set_rate_range_nolock(clk, min, max); 2565 2566 clk_prepare_unlock(); 2567 2568 return ret; 2569 } 2570 EXPORT_SYMBOL_GPL(clk_set_rate_range); 2571 2572 /** 2573 * clk_set_min_rate - set a minimum clock rate for a clock source 2574 * @clk: clock source 2575 * @rate: desired minimum clock rate in Hz, inclusive 2576 * 2577 * Returns success (0) or negative errno. 2578 */ 2579 int clk_set_min_rate(struct clk *clk, unsigned long rate) 2580 { 2581 if (!clk) 2582 return 0; 2583 2584 trace_clk_set_min_rate(clk->core, rate); 2585 2586 return clk_set_rate_range(clk, rate, clk->max_rate); 2587 } 2588 EXPORT_SYMBOL_GPL(clk_set_min_rate); 2589 2590 /** 2591 * clk_set_max_rate - set a maximum clock rate for a clock source 2592 * @clk: clock source 2593 * @rate: desired maximum clock rate in Hz, inclusive 2594 * 2595 * Returns success (0) or negative errno. 2596 */ 2597 int clk_set_max_rate(struct clk *clk, unsigned long rate) 2598 { 2599 if (!clk) 2600 return 0; 2601 2602 trace_clk_set_max_rate(clk->core, rate); 2603 2604 return clk_set_rate_range(clk, clk->min_rate, rate); 2605 } 2606 EXPORT_SYMBOL_GPL(clk_set_max_rate); 2607 2608 /** 2609 * clk_get_parent - return the parent of a clk 2610 * @clk: the clk whose parent gets returned 2611 * 2612 * Simply returns clk->parent. Returns NULL if clk is NULL. 2613 */ 2614 struct clk *clk_get_parent(struct clk *clk) 2615 { 2616 struct clk *parent; 2617 2618 if (!clk) 2619 return NULL; 2620 2621 clk_prepare_lock(); 2622 /* TODO: Create a per-user clk and change callers to call clk_put */ 2623 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; 2624 clk_prepare_unlock(); 2625 2626 return parent; 2627 } 2628 EXPORT_SYMBOL_GPL(clk_get_parent); 2629 2630 static struct clk_core *__clk_init_parent(struct clk_core *core) 2631 { 2632 u8 index = 0; 2633 2634 if (core->num_parents > 1 && core->ops->get_parent) 2635 index = core->ops->get_parent(core->hw); 2636 2637 return clk_core_get_parent_by_index(core, index); 2638 } 2639 2640 static void clk_core_reparent(struct clk_core *core, 2641 struct clk_core *new_parent) 2642 { 2643 clk_reparent(core, new_parent); 2644 __clk_recalc_accuracies(core); 2645 __clk_recalc_rates(core, true, POST_RATE_CHANGE); 2646 } 2647 2648 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) 2649 { 2650 if (!hw) 2651 return; 2652 2653 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); 2654 } 2655 2656 /** 2657 * clk_has_parent - check if a clock is a possible parent for another 2658 * @clk: clock source 2659 * @parent: parent clock source 2660 * 2661 * This function can be used in drivers that need to check that a clock can be 2662 * the parent of another without actually changing the parent. 2663 * 2664 * Returns true if @parent is a possible parent for @clk, false otherwise. 2665 */ 2666 bool clk_has_parent(const struct clk *clk, const struct clk *parent) 2667 { 2668 /* NULL clocks should be nops, so return success if either is NULL. */ 2669 if (!clk || !parent) 2670 return true; 2671 2672 return clk_core_has_parent(clk->core, parent->core); 2673 } 2674 EXPORT_SYMBOL_GPL(clk_has_parent); 2675 2676 static int clk_core_set_parent_nolock(struct clk_core *core, 2677 struct clk_core *parent) 2678 { 2679 int ret = 0; 2680 int p_index = 0; 2681 unsigned long p_rate = 0; 2682 2683 lockdep_assert_held(&prepare_lock); 2684 2685 if (!core) 2686 return 0; 2687 2688 if (core->parent == parent) 2689 return 0; 2690 2691 /* verify ops for multi-parent clks */ 2692 if (core->num_parents > 1 && !core->ops->set_parent) 2693 return -EPERM; 2694 2695 /* check that we are allowed to re-parent if the clock is in use */ 2696 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) 2697 return -EBUSY; 2698 2699 if (clk_core_rate_is_protected(core)) 2700 return -EBUSY; 2701 2702 /* try finding the new parent index */ 2703 if (parent) { 2704 p_index = clk_fetch_parent_index(core, parent); 2705 if (p_index < 0) { 2706 pr_debug("%s: clk %s can not be parent of clk %s\n", 2707 __func__, parent->name, core->name); 2708 return p_index; 2709 } 2710 p_rate = parent->rate; 2711 } 2712 2713 ret = clk_pm_runtime_get(core); 2714 if (ret) 2715 return ret; 2716 2717 /* propagate PRE_RATE_CHANGE notifications */ 2718 ret = __clk_speculate_rates(core, p_rate); 2719 2720 /* abort if a driver objects */ 2721 if (ret & NOTIFY_STOP_MASK) 2722 goto runtime_put; 2723 2724 /* do the re-parent */ 2725 ret = __clk_set_parent(core, parent, p_index); 2726 2727 /* propagate rate an accuracy recalculation accordingly */ 2728 if (ret) { 2729 __clk_recalc_rates(core, true, ABORT_RATE_CHANGE); 2730 } else { 2731 __clk_recalc_rates(core, true, POST_RATE_CHANGE); 2732 __clk_recalc_accuracies(core); 2733 } 2734 2735 runtime_put: 2736 clk_pm_runtime_put(core); 2737 2738 return ret; 2739 } 2740 2741 int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent) 2742 { 2743 return clk_core_set_parent_nolock(hw->core, parent->core); 2744 } 2745 EXPORT_SYMBOL_GPL(clk_hw_set_parent); 2746 2747 /** 2748 * clk_set_parent - switch the parent of a mux clk 2749 * @clk: the mux clk whose input we are switching 2750 * @parent: the new input to clk 2751 * 2752 * Re-parent clk to use parent as its new input source. If clk is in 2753 * prepared state, the clk will get enabled for the duration of this call. If 2754 * that's not acceptable for a specific clk (Eg: the consumer can't handle 2755 * that, the reparenting is glitchy in hardware, etc), use the 2756 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 2757 * 2758 * After successfully changing clk's parent clk_set_parent will update the 2759 * clk topology, sysfs topology and propagate rate recalculation via 2760 * __clk_recalc_rates. 2761 * 2762 * Returns 0 on success, -EERROR otherwise. 2763 */ 2764 int clk_set_parent(struct clk *clk, struct clk *parent) 2765 { 2766 int ret; 2767 2768 if (!clk) 2769 return 0; 2770 2771 clk_prepare_lock(); 2772 2773 if (clk->exclusive_count) 2774 clk_core_rate_unprotect(clk->core); 2775 2776 ret = clk_core_set_parent_nolock(clk->core, 2777 parent ? parent->core : NULL); 2778 2779 if (clk->exclusive_count) 2780 clk_core_rate_protect(clk->core); 2781 2782 clk_prepare_unlock(); 2783 2784 return ret; 2785 } 2786 EXPORT_SYMBOL_GPL(clk_set_parent); 2787 2788 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) 2789 { 2790 int ret = -EINVAL; 2791 2792 lockdep_assert_held(&prepare_lock); 2793 2794 if (!core) 2795 return 0; 2796 2797 if (clk_core_rate_is_protected(core)) 2798 return -EBUSY; 2799 2800 trace_clk_set_phase(core, degrees); 2801 2802 if (core->ops->set_phase) { 2803 ret = core->ops->set_phase(core->hw, degrees); 2804 if (!ret) 2805 core->phase = degrees; 2806 } 2807 2808 trace_clk_set_phase_complete(core, degrees); 2809 2810 return ret; 2811 } 2812 2813 /** 2814 * clk_set_phase - adjust the phase shift of a clock signal 2815 * @clk: clock signal source 2816 * @degrees: number of degrees the signal is shifted 2817 * 2818 * Shifts the phase of a clock signal by the specified 2819 * degrees. Returns 0 on success, -EERROR otherwise. 2820 * 2821 * This function makes no distinction about the input or reference 2822 * signal that we adjust the clock signal phase against. For example 2823 * phase locked-loop clock signal generators we may shift phase with 2824 * respect to feedback clock signal input, but for other cases the 2825 * clock phase may be shifted with respect to some other, unspecified 2826 * signal. 2827 * 2828 * Additionally the concept of phase shift does not propagate through 2829 * the clock tree hierarchy, which sets it apart from clock rates and 2830 * clock accuracy. A parent clock phase attribute does not have an 2831 * impact on the phase attribute of a child clock. 2832 */ 2833 int clk_set_phase(struct clk *clk, int degrees) 2834 { 2835 int ret; 2836 2837 if (!clk) 2838 return 0; 2839 2840 /* sanity check degrees */ 2841 degrees %= 360; 2842 if (degrees < 0) 2843 degrees += 360; 2844 2845 clk_prepare_lock(); 2846 2847 if (clk->exclusive_count) 2848 clk_core_rate_unprotect(clk->core); 2849 2850 ret = clk_core_set_phase_nolock(clk->core, degrees); 2851 2852 if (clk->exclusive_count) 2853 clk_core_rate_protect(clk->core); 2854 2855 clk_prepare_unlock(); 2856 2857 return ret; 2858 } 2859 EXPORT_SYMBOL_GPL(clk_set_phase); 2860 2861 static int clk_core_get_phase(struct clk_core *core) 2862 { 2863 int ret; 2864 2865 lockdep_assert_held(&prepare_lock); 2866 if (!core->ops->get_phase) 2867 return 0; 2868 2869 /* Always try to update cached phase if possible */ 2870 ret = core->ops->get_phase(core->hw); 2871 if (ret >= 0) 2872 core->phase = ret; 2873 2874 return ret; 2875 } 2876 2877 /** 2878 * clk_get_phase - return the phase shift of a clock signal 2879 * @clk: clock signal source 2880 * 2881 * Returns the phase shift of a clock node in degrees, otherwise returns 2882 * -EERROR. 2883 */ 2884 int clk_get_phase(struct clk *clk) 2885 { 2886 int ret; 2887 2888 if (!clk) 2889 return 0; 2890 2891 clk_prepare_lock(); 2892 ret = clk_core_get_phase(clk->core); 2893 clk_prepare_unlock(); 2894 2895 return ret; 2896 } 2897 EXPORT_SYMBOL_GPL(clk_get_phase); 2898 2899 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) 2900 { 2901 /* Assume a default value of 50% */ 2902 core->duty.num = 1; 2903 core->duty.den = 2; 2904 } 2905 2906 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core); 2907 2908 static int clk_core_update_duty_cycle_nolock(struct clk_core *core) 2909 { 2910 struct clk_duty *duty = &core->duty; 2911 int ret = 0; 2912 2913 if (!core->ops->get_duty_cycle) 2914 return clk_core_update_duty_cycle_parent_nolock(core); 2915 2916 ret = core->ops->get_duty_cycle(core->hw, duty); 2917 if (ret) 2918 goto reset; 2919 2920 /* Don't trust the clock provider too much */ 2921 if (duty->den == 0 || duty->num > duty->den) { 2922 ret = -EINVAL; 2923 goto reset; 2924 } 2925 2926 return 0; 2927 2928 reset: 2929 clk_core_reset_duty_cycle_nolock(core); 2930 return ret; 2931 } 2932 2933 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) 2934 { 2935 int ret = 0; 2936 2937 if (core->parent && 2938 core->flags & CLK_DUTY_CYCLE_PARENT) { 2939 ret = clk_core_update_duty_cycle_nolock(core->parent); 2940 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); 2941 } else { 2942 clk_core_reset_duty_cycle_nolock(core); 2943 } 2944 2945 return ret; 2946 } 2947 2948 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, 2949 struct clk_duty *duty); 2950 2951 static int clk_core_set_duty_cycle_nolock(struct clk_core *core, 2952 struct clk_duty *duty) 2953 { 2954 int ret; 2955 2956 lockdep_assert_held(&prepare_lock); 2957 2958 if (clk_core_rate_is_protected(core)) 2959 return -EBUSY; 2960 2961 trace_clk_set_duty_cycle(core, duty); 2962 2963 if (!core->ops->set_duty_cycle) 2964 return clk_core_set_duty_cycle_parent_nolock(core, duty); 2965 2966 ret = core->ops->set_duty_cycle(core->hw, duty); 2967 if (!ret) 2968 memcpy(&core->duty, duty, sizeof(*duty)); 2969 2970 trace_clk_set_duty_cycle_complete(core, duty); 2971 2972 return ret; 2973 } 2974 2975 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, 2976 struct clk_duty *duty) 2977 { 2978 int ret = 0; 2979 2980 if (core->parent && 2981 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { 2982 ret = clk_core_set_duty_cycle_nolock(core->parent, duty); 2983 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); 2984 } 2985 2986 return ret; 2987 } 2988 2989 /** 2990 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 2991 * @clk: clock signal source 2992 * @num: numerator of the duty cycle ratio to be applied 2993 * @den: denominator of the duty cycle ratio to be applied 2994 * 2995 * Apply the duty cycle ratio if the ratio is valid and the clock can 2996 * perform this operation 2997 * 2998 * Returns (0) on success, a negative errno otherwise. 2999 */ 3000 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) 3001 { 3002 int ret; 3003 struct clk_duty duty; 3004 3005 if (!clk) 3006 return 0; 3007 3008 /* sanity check the ratio */ 3009 if (den == 0 || num > den) 3010 return -EINVAL; 3011 3012 duty.num = num; 3013 duty.den = den; 3014 3015 clk_prepare_lock(); 3016 3017 if (clk->exclusive_count) 3018 clk_core_rate_unprotect(clk->core); 3019 3020 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); 3021 3022 if (clk->exclusive_count) 3023 clk_core_rate_protect(clk->core); 3024 3025 clk_prepare_unlock(); 3026 3027 return ret; 3028 } 3029 EXPORT_SYMBOL_GPL(clk_set_duty_cycle); 3030 3031 static int clk_core_get_scaled_duty_cycle(struct clk_core *core, 3032 unsigned int scale) 3033 { 3034 struct clk_duty *duty = &core->duty; 3035 int ret; 3036 3037 clk_prepare_lock(); 3038 3039 ret = clk_core_update_duty_cycle_nolock(core); 3040 if (!ret) 3041 ret = mult_frac(scale, duty->num, duty->den); 3042 3043 clk_prepare_unlock(); 3044 3045 return ret; 3046 } 3047 3048 /** 3049 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal 3050 * @clk: clock signal source 3051 * @scale: scaling factor to be applied to represent the ratio as an integer 3052 * 3053 * Returns the duty cycle ratio of a clock node multiplied by the provided 3054 * scaling factor, or negative errno on error. 3055 */ 3056 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) 3057 { 3058 if (!clk) 3059 return 0; 3060 3061 return clk_core_get_scaled_duty_cycle(clk->core, scale); 3062 } 3063 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle); 3064 3065 /** 3066 * clk_is_match - check if two clk's point to the same hardware clock 3067 * @p: clk compared against q 3068 * @q: clk compared against p 3069 * 3070 * Returns true if the two struct clk pointers both point to the same hardware 3071 * clock node. Put differently, returns true if struct clk *p and struct clk *q 3072 * share the same struct clk_core object. 3073 * 3074 * Returns false otherwise. Note that two NULL clks are treated as matching. 3075 */ 3076 bool clk_is_match(const struct clk *p, const struct clk *q) 3077 { 3078 /* trivial case: identical struct clk's or both NULL */ 3079 if (p == q) 3080 return true; 3081 3082 /* true if clk->core pointers match. Avoid dereferencing garbage */ 3083 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) 3084 if (p->core == q->core) 3085 return true; 3086 3087 return false; 3088 } 3089 EXPORT_SYMBOL_GPL(clk_is_match); 3090 3091 /*** debugfs support ***/ 3092 3093 #ifdef CONFIG_DEBUG_FS 3094 #include <linux/debugfs.h> 3095 3096 static struct dentry *rootdir; 3097 static int inited = 0; 3098 static DEFINE_MUTEX(clk_debug_lock); 3099 static HLIST_HEAD(clk_debug_list); 3100 3101 static struct hlist_head *orphan_list[] = { 3102 &clk_orphan_list, 3103 NULL, 3104 }; 3105 3106 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, 3107 int level) 3108 { 3109 int phase; 3110 3111 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ", 3112 level * 3 + 1, "", 3113 30 - level * 3, c->name, 3114 c->enable_count, c->prepare_count, c->protect_count, 3115 clk_core_get_rate_recalc(c), 3116 clk_core_get_accuracy_recalc(c)); 3117 3118 phase = clk_core_get_phase(c); 3119 if (phase >= 0) 3120 seq_printf(s, "%5d", phase); 3121 else 3122 seq_puts(s, "-----"); 3123 3124 seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000)); 3125 3126 if (c->ops->is_enabled) 3127 seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N'); 3128 else if (!c->ops->enable) 3129 seq_printf(s, " %9c\n", 'Y'); 3130 else 3131 seq_printf(s, " %9c\n", '?'); 3132 } 3133 3134 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, 3135 int level) 3136 { 3137 struct clk_core *child; 3138 3139 clk_pm_runtime_get(c); 3140 clk_summary_show_one(s, c, level); 3141 clk_pm_runtime_put(c); 3142 3143 hlist_for_each_entry(child, &c->children, child_node) 3144 clk_summary_show_subtree(s, child, level + 1); 3145 } 3146 3147 static int clk_summary_show(struct seq_file *s, void *data) 3148 { 3149 struct clk_core *c; 3150 struct hlist_head **lists = (struct hlist_head **)s->private; 3151 3152 seq_puts(s, " enable prepare protect duty hardware\n"); 3153 seq_puts(s, " clock count count count rate accuracy phase cycle enable\n"); 3154 seq_puts(s, "-------------------------------------------------------------------------------------------------------\n"); 3155 3156 clk_prepare_lock(); 3157 3158 for (; *lists; lists++) 3159 hlist_for_each_entry(c, *lists, child_node) 3160 clk_summary_show_subtree(s, c, 0); 3161 3162 clk_prepare_unlock(); 3163 3164 return 0; 3165 } 3166 DEFINE_SHOW_ATTRIBUTE(clk_summary); 3167 3168 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) 3169 { 3170 int phase; 3171 unsigned long min_rate, max_rate; 3172 3173 clk_core_get_boundaries(c, &min_rate, &max_rate); 3174 3175 /* This should be JSON format, i.e. elements separated with a comma */ 3176 seq_printf(s, "\"%s\": { ", c->name); 3177 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 3178 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 3179 seq_printf(s, "\"protect_count\": %d,", c->protect_count); 3180 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c)); 3181 seq_printf(s, "\"min_rate\": %lu,", min_rate); 3182 seq_printf(s, "\"max_rate\": %lu,", max_rate); 3183 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c)); 3184 phase = clk_core_get_phase(c); 3185 if (phase >= 0) 3186 seq_printf(s, "\"phase\": %d,", phase); 3187 seq_printf(s, "\"duty_cycle\": %u", 3188 clk_core_get_scaled_duty_cycle(c, 100000)); 3189 } 3190 3191 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) 3192 { 3193 struct clk_core *child; 3194 3195 clk_dump_one(s, c, level); 3196 3197 hlist_for_each_entry(child, &c->children, child_node) { 3198 seq_putc(s, ','); 3199 clk_dump_subtree(s, child, level + 1); 3200 } 3201 3202 seq_putc(s, '}'); 3203 } 3204 3205 static int clk_dump_show(struct seq_file *s, void *data) 3206 { 3207 struct clk_core *c; 3208 bool first_node = true; 3209 struct hlist_head **lists = (struct hlist_head **)s->private; 3210 3211 seq_putc(s, '{'); 3212 clk_prepare_lock(); 3213 3214 for (; *lists; lists++) { 3215 hlist_for_each_entry(c, *lists, child_node) { 3216 if (!first_node) 3217 seq_putc(s, ','); 3218 first_node = false; 3219 clk_dump_subtree(s, c, 0); 3220 } 3221 } 3222 3223 clk_prepare_unlock(); 3224 3225 seq_puts(s, "}\n"); 3226 return 0; 3227 } 3228 DEFINE_SHOW_ATTRIBUTE(clk_dump); 3229 3230 #undef CLOCK_ALLOW_WRITE_DEBUGFS 3231 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3232 /* 3233 * This can be dangerous, therefore don't provide any real compile time 3234 * configuration option for this feature. 3235 * People who want to use this will need to modify the source code directly. 3236 */ 3237 static int clk_rate_set(void *data, u64 val) 3238 { 3239 struct clk_core *core = data; 3240 int ret; 3241 3242 clk_prepare_lock(); 3243 ret = clk_core_set_rate_nolock(core, val); 3244 clk_prepare_unlock(); 3245 3246 return ret; 3247 } 3248 3249 #define clk_rate_mode 0644 3250 3251 static int clk_prepare_enable_set(void *data, u64 val) 3252 { 3253 struct clk_core *core = data; 3254 int ret = 0; 3255 3256 if (val) 3257 ret = clk_prepare_enable(core->hw->clk); 3258 else 3259 clk_disable_unprepare(core->hw->clk); 3260 3261 return ret; 3262 } 3263 3264 static int clk_prepare_enable_get(void *data, u64 *val) 3265 { 3266 struct clk_core *core = data; 3267 3268 *val = core->enable_count && core->prepare_count; 3269 return 0; 3270 } 3271 3272 DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get, 3273 clk_prepare_enable_set, "%llu\n"); 3274 3275 #else 3276 #define clk_rate_set NULL 3277 #define clk_rate_mode 0444 3278 #endif 3279 3280 static int clk_rate_get(void *data, u64 *val) 3281 { 3282 struct clk_core *core = data; 3283 3284 clk_prepare_lock(); 3285 *val = clk_core_get_rate_recalc(core); 3286 clk_prepare_unlock(); 3287 3288 return 0; 3289 } 3290 3291 DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n"); 3292 3293 static const struct { 3294 unsigned long flag; 3295 const char *name; 3296 } clk_flags[] = { 3297 #define ENTRY(f) { f, #f } 3298 ENTRY(CLK_SET_RATE_GATE), 3299 ENTRY(CLK_SET_PARENT_GATE), 3300 ENTRY(CLK_SET_RATE_PARENT), 3301 ENTRY(CLK_IGNORE_UNUSED), 3302 ENTRY(CLK_GET_RATE_NOCACHE), 3303 ENTRY(CLK_SET_RATE_NO_REPARENT), 3304 ENTRY(CLK_GET_ACCURACY_NOCACHE), 3305 ENTRY(CLK_RECALC_NEW_RATES), 3306 ENTRY(CLK_SET_RATE_UNGATE), 3307 ENTRY(CLK_IS_CRITICAL), 3308 ENTRY(CLK_OPS_PARENT_ENABLE), 3309 ENTRY(CLK_DUTY_CYCLE_PARENT), 3310 #undef ENTRY 3311 }; 3312 3313 static int clk_flags_show(struct seq_file *s, void *data) 3314 { 3315 struct clk_core *core = s->private; 3316 unsigned long flags = core->flags; 3317 unsigned int i; 3318 3319 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) { 3320 if (flags & clk_flags[i].flag) { 3321 seq_printf(s, "%s\n", clk_flags[i].name); 3322 flags &= ~clk_flags[i].flag; 3323 } 3324 } 3325 if (flags) { 3326 /* Unknown flags */ 3327 seq_printf(s, "0x%lx\n", flags); 3328 } 3329 3330 return 0; 3331 } 3332 DEFINE_SHOW_ATTRIBUTE(clk_flags); 3333 3334 static void possible_parent_show(struct seq_file *s, struct clk_core *core, 3335 unsigned int i, char terminator) 3336 { 3337 struct clk_core *parent; 3338 3339 /* 3340 * Go through the following options to fetch a parent's name. 3341 * 3342 * 1. Fetch the registered parent clock and use its name 3343 * 2. Use the global (fallback) name if specified 3344 * 3. Use the local fw_name if provided 3345 * 4. Fetch parent clock's clock-output-name if DT index was set 3346 * 3347 * This may still fail in some cases, such as when the parent is 3348 * specified directly via a struct clk_hw pointer, but it isn't 3349 * registered (yet). 3350 */ 3351 parent = clk_core_get_parent_by_index(core, i); 3352 if (parent) 3353 seq_puts(s, parent->name); 3354 else if (core->parents[i].name) 3355 seq_puts(s, core->parents[i].name); 3356 else if (core->parents[i].fw_name) 3357 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name); 3358 else if (core->parents[i].index >= 0) 3359 seq_puts(s, 3360 of_clk_get_parent_name(core->of_node, 3361 core->parents[i].index)); 3362 else 3363 seq_puts(s, "(missing)"); 3364 3365 seq_putc(s, terminator); 3366 } 3367 3368 static int possible_parents_show(struct seq_file *s, void *data) 3369 { 3370 struct clk_core *core = s->private; 3371 int i; 3372 3373 for (i = 0; i < core->num_parents - 1; i++) 3374 possible_parent_show(s, core, i, ' '); 3375 3376 possible_parent_show(s, core, i, '\n'); 3377 3378 return 0; 3379 } 3380 DEFINE_SHOW_ATTRIBUTE(possible_parents); 3381 3382 static int current_parent_show(struct seq_file *s, void *data) 3383 { 3384 struct clk_core *core = s->private; 3385 3386 if (core->parent) 3387 seq_printf(s, "%s\n", core->parent->name); 3388 3389 return 0; 3390 } 3391 DEFINE_SHOW_ATTRIBUTE(current_parent); 3392 3393 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3394 static ssize_t current_parent_write(struct file *file, const char __user *ubuf, 3395 size_t count, loff_t *ppos) 3396 { 3397 struct seq_file *s = file->private_data; 3398 struct clk_core *core = s->private; 3399 struct clk_core *parent; 3400 u8 idx; 3401 int err; 3402 3403 err = kstrtou8_from_user(ubuf, count, 0, &idx); 3404 if (err < 0) 3405 return err; 3406 3407 parent = clk_core_get_parent_by_index(core, idx); 3408 if (!parent) 3409 return -ENOENT; 3410 3411 clk_prepare_lock(); 3412 err = clk_core_set_parent_nolock(core, parent); 3413 clk_prepare_unlock(); 3414 if (err) 3415 return err; 3416 3417 return count; 3418 } 3419 3420 static const struct file_operations current_parent_rw_fops = { 3421 .open = current_parent_open, 3422 .write = current_parent_write, 3423 .read = seq_read, 3424 .llseek = seq_lseek, 3425 .release = single_release, 3426 }; 3427 #endif 3428 3429 static int clk_duty_cycle_show(struct seq_file *s, void *data) 3430 { 3431 struct clk_core *core = s->private; 3432 struct clk_duty *duty = &core->duty; 3433 3434 seq_printf(s, "%u/%u\n", duty->num, duty->den); 3435 3436 return 0; 3437 } 3438 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle); 3439 3440 static int clk_min_rate_show(struct seq_file *s, void *data) 3441 { 3442 struct clk_core *core = s->private; 3443 unsigned long min_rate, max_rate; 3444 3445 clk_prepare_lock(); 3446 clk_core_get_boundaries(core, &min_rate, &max_rate); 3447 clk_prepare_unlock(); 3448 seq_printf(s, "%lu\n", min_rate); 3449 3450 return 0; 3451 } 3452 DEFINE_SHOW_ATTRIBUTE(clk_min_rate); 3453 3454 static int clk_max_rate_show(struct seq_file *s, void *data) 3455 { 3456 struct clk_core *core = s->private; 3457 unsigned long min_rate, max_rate; 3458 3459 clk_prepare_lock(); 3460 clk_core_get_boundaries(core, &min_rate, &max_rate); 3461 clk_prepare_unlock(); 3462 seq_printf(s, "%lu\n", max_rate); 3463 3464 return 0; 3465 } 3466 DEFINE_SHOW_ATTRIBUTE(clk_max_rate); 3467 3468 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) 3469 { 3470 struct dentry *root; 3471 3472 if (!core || !pdentry) 3473 return; 3474 3475 root = debugfs_create_dir(core->name, pdentry); 3476 core->dentry = root; 3477 3478 debugfs_create_file("clk_rate", clk_rate_mode, root, core, 3479 &clk_rate_fops); 3480 debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops); 3481 debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops); 3482 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); 3483 debugfs_create_u32("clk_phase", 0444, root, &core->phase); 3484 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); 3485 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); 3486 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); 3487 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); 3488 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); 3489 debugfs_create_file("clk_duty_cycle", 0444, root, core, 3490 &clk_duty_cycle_fops); 3491 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3492 debugfs_create_file("clk_prepare_enable", 0644, root, core, 3493 &clk_prepare_enable_fops); 3494 3495 if (core->num_parents > 1) 3496 debugfs_create_file("clk_parent", 0644, root, core, 3497 ¤t_parent_rw_fops); 3498 else 3499 #endif 3500 if (core->num_parents > 0) 3501 debugfs_create_file("clk_parent", 0444, root, core, 3502 ¤t_parent_fops); 3503 3504 if (core->num_parents > 1) 3505 debugfs_create_file("clk_possible_parents", 0444, root, core, 3506 &possible_parents_fops); 3507 3508 if (core->ops->debug_init) 3509 core->ops->debug_init(core->hw, core->dentry); 3510 } 3511 3512 /** 3513 * clk_debug_register - add a clk node to the debugfs clk directory 3514 * @core: the clk being added to the debugfs clk directory 3515 * 3516 * Dynamically adds a clk to the debugfs clk directory if debugfs has been 3517 * initialized. Otherwise it bails out early since the debugfs clk directory 3518 * will be created lazily by clk_debug_init as part of a late_initcall. 3519 */ 3520 static void clk_debug_register(struct clk_core *core) 3521 { 3522 mutex_lock(&clk_debug_lock); 3523 hlist_add_head(&core->debug_node, &clk_debug_list); 3524 if (inited) 3525 clk_debug_create_one(core, rootdir); 3526 mutex_unlock(&clk_debug_lock); 3527 } 3528 3529 /** 3530 * clk_debug_unregister - remove a clk node from the debugfs clk directory 3531 * @core: the clk being removed from the debugfs clk directory 3532 * 3533 * Dynamically removes a clk and all its child nodes from the 3534 * debugfs clk directory if clk->dentry points to debugfs created by 3535 * clk_debug_register in __clk_core_init. 3536 */ 3537 static void clk_debug_unregister(struct clk_core *core) 3538 { 3539 mutex_lock(&clk_debug_lock); 3540 hlist_del_init(&core->debug_node); 3541 debugfs_remove_recursive(core->dentry); 3542 core->dentry = NULL; 3543 mutex_unlock(&clk_debug_lock); 3544 } 3545 3546 /** 3547 * clk_debug_init - lazily populate the debugfs clk directory 3548 * 3549 * clks are often initialized very early during boot before memory can be 3550 * dynamically allocated and well before debugfs is setup. This function 3551 * populates the debugfs clk directory once at boot-time when we know that 3552 * debugfs is setup. It should only be called once at boot-time, all other clks 3553 * added dynamically will be done so with clk_debug_register. 3554 */ 3555 static int __init clk_debug_init(void) 3556 { 3557 struct clk_core *core; 3558 3559 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3560 pr_warn("\n"); 3561 pr_warn("********************************************************************\n"); 3562 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3563 pr_warn("** **\n"); 3564 pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n"); 3565 pr_warn("** **\n"); 3566 pr_warn("** This means that this kernel is built to expose clk operations **\n"); 3567 pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n"); 3568 pr_warn("** to userspace, which may compromise security on your system. **\n"); 3569 pr_warn("** **\n"); 3570 pr_warn("** If you see this message and you are not debugging the **\n"); 3571 pr_warn("** kernel, report this immediately to your vendor! **\n"); 3572 pr_warn("** **\n"); 3573 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3574 pr_warn("********************************************************************\n"); 3575 #endif 3576 3577 rootdir = debugfs_create_dir("clk", NULL); 3578 3579 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, 3580 &clk_summary_fops); 3581 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists, 3582 &clk_dump_fops); 3583 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list, 3584 &clk_summary_fops); 3585 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list, 3586 &clk_dump_fops); 3587 3588 mutex_lock(&clk_debug_lock); 3589 hlist_for_each_entry(core, &clk_debug_list, debug_node) 3590 clk_debug_create_one(core, rootdir); 3591 3592 inited = 1; 3593 mutex_unlock(&clk_debug_lock); 3594 3595 return 0; 3596 } 3597 late_initcall(clk_debug_init); 3598 #else 3599 static inline void clk_debug_register(struct clk_core *core) { } 3600 static inline void clk_debug_unregister(struct clk_core *core) 3601 { 3602 } 3603 #endif 3604 3605 static void clk_core_reparent_orphans_nolock(void) 3606 { 3607 struct clk_core *orphan; 3608 struct hlist_node *tmp2; 3609 3610 /* 3611 * walk the list of orphan clocks and reparent any that newly finds a 3612 * parent. 3613 */ 3614 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 3615 struct clk_core *parent = __clk_init_parent(orphan); 3616 3617 /* 3618 * We need to use __clk_set_parent_before() and _after() to 3619 * properly migrate any prepare/enable count of the orphan 3620 * clock. This is important for CLK_IS_CRITICAL clocks, which 3621 * are enabled during init but might not have a parent yet. 3622 */ 3623 if (parent) { 3624 /* update the clk tree topology */ 3625 __clk_set_parent_before(orphan, parent); 3626 __clk_set_parent_after(orphan, parent, NULL); 3627 __clk_recalc_accuracies(orphan); 3628 __clk_recalc_rates(orphan, true, 0); 3629 3630 /* 3631 * __clk_init_parent() will set the initial req_rate to 3632 * 0 if the clock doesn't have clk_ops::recalc_rate and 3633 * is an orphan when it's registered. 3634 * 3635 * 'req_rate' is used by clk_set_rate_range() and 3636 * clk_put() to trigger a clk_set_rate() call whenever 3637 * the boundaries are modified. Let's make sure 3638 * 'req_rate' is set to something non-zero so that 3639 * clk_set_rate_range() doesn't drop the frequency. 3640 */ 3641 orphan->req_rate = orphan->rate; 3642 } 3643 } 3644 } 3645 3646 /** 3647 * __clk_core_init - initialize the data structures in a struct clk_core 3648 * @core: clk_core being initialized 3649 * 3650 * Initializes the lists in struct clk_core, queries the hardware for the 3651 * parent and rate and sets them both. 3652 */ 3653 static int __clk_core_init(struct clk_core *core) 3654 { 3655 int ret; 3656 struct clk_core *parent; 3657 unsigned long rate; 3658 int phase; 3659 3660 clk_prepare_lock(); 3661 3662 /* 3663 * Set hw->core after grabbing the prepare_lock to synchronize with 3664 * callers of clk_core_fill_parent_index() where we treat hw->core 3665 * being NULL as the clk not being registered yet. This is crucial so 3666 * that clks aren't parented until their parent is fully registered. 3667 */ 3668 core->hw->core = core; 3669 3670 ret = clk_pm_runtime_get(core); 3671 if (ret) 3672 goto unlock; 3673 3674 /* check to see if a clock with this name is already registered */ 3675 if (clk_core_lookup(core->name)) { 3676 pr_debug("%s: clk %s already initialized\n", 3677 __func__, core->name); 3678 ret = -EEXIST; 3679 goto out; 3680 } 3681 3682 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ 3683 if (core->ops->set_rate && 3684 !((core->ops->round_rate || core->ops->determine_rate) && 3685 core->ops->recalc_rate)) { 3686 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 3687 __func__, core->name); 3688 ret = -EINVAL; 3689 goto out; 3690 } 3691 3692 if (core->ops->set_parent && !core->ops->get_parent) { 3693 pr_err("%s: %s must implement .get_parent & .set_parent\n", 3694 __func__, core->name); 3695 ret = -EINVAL; 3696 goto out; 3697 } 3698 3699 if (core->num_parents > 1 && !core->ops->get_parent) { 3700 pr_err("%s: %s must implement .get_parent as it has multi parents\n", 3701 __func__, core->name); 3702 ret = -EINVAL; 3703 goto out; 3704 } 3705 3706 if (core->ops->set_rate_and_parent && 3707 !(core->ops->set_parent && core->ops->set_rate)) { 3708 pr_err("%s: %s must implement .set_parent & .set_rate\n", 3709 __func__, core->name); 3710 ret = -EINVAL; 3711 goto out; 3712 } 3713 3714 /* 3715 * optional platform-specific magic 3716 * 3717 * The .init callback is not used by any of the basic clock types, but 3718 * exists for weird hardware that must perform initialization magic for 3719 * CCF to get an accurate view of clock for any other callbacks. It may 3720 * also be used needs to perform dynamic allocations. Such allocation 3721 * must be freed in the terminate() callback. 3722 * This callback shall not be used to initialize the parameters state, 3723 * such as rate, parent, etc ... 3724 * 3725 * If it exist, this callback should called before any other callback of 3726 * the clock 3727 */ 3728 if (core->ops->init) { 3729 ret = core->ops->init(core->hw); 3730 if (ret) 3731 goto out; 3732 } 3733 3734 parent = core->parent = __clk_init_parent(core); 3735 3736 /* 3737 * Populate core->parent if parent has already been clk_core_init'd. If 3738 * parent has not yet been clk_core_init'd then place clk in the orphan 3739 * list. If clk doesn't have any parents then place it in the root 3740 * clk list. 3741 * 3742 * Every time a new clk is clk_init'd then we walk the list of orphan 3743 * clocks and re-parent any that are children of the clock currently 3744 * being clk_init'd. 3745 */ 3746 if (parent) { 3747 hlist_add_head(&core->child_node, &parent->children); 3748 core->orphan = parent->orphan; 3749 } else if (!core->num_parents) { 3750 hlist_add_head(&core->child_node, &clk_root_list); 3751 core->orphan = false; 3752 } else { 3753 hlist_add_head(&core->child_node, &clk_orphan_list); 3754 core->orphan = true; 3755 } 3756 3757 /* 3758 * Set clk's accuracy. The preferred method is to use 3759 * .recalc_accuracy. For simple clocks and lazy developers the default 3760 * fallback is to use the parent's accuracy. If a clock doesn't have a 3761 * parent (or is orphaned) then accuracy is set to zero (perfect 3762 * clock). 3763 */ 3764 if (core->ops->recalc_accuracy) 3765 core->accuracy = core->ops->recalc_accuracy(core->hw, 3766 clk_core_get_accuracy_no_lock(parent)); 3767 else if (parent) 3768 core->accuracy = parent->accuracy; 3769 else 3770 core->accuracy = 0; 3771 3772 /* 3773 * Set clk's phase by clk_core_get_phase() caching the phase. 3774 * Since a phase is by definition relative to its parent, just 3775 * query the current clock phase, or just assume it's in phase. 3776 */ 3777 phase = clk_core_get_phase(core); 3778 if (phase < 0) { 3779 ret = phase; 3780 pr_warn("%s: Failed to get phase for clk '%s'\n", __func__, 3781 core->name); 3782 goto out; 3783 } 3784 3785 /* 3786 * Set clk's duty cycle. 3787 */ 3788 clk_core_update_duty_cycle_nolock(core); 3789 3790 /* 3791 * Set clk's rate. The preferred method is to use .recalc_rate. For 3792 * simple clocks and lazy developers the default fallback is to use the 3793 * parent's rate. If a clock doesn't have a parent (or is orphaned) 3794 * then rate is set to zero. 3795 */ 3796 if (core->ops->recalc_rate) 3797 rate = core->ops->recalc_rate(core->hw, 3798 clk_core_get_rate_nolock(parent)); 3799 else if (parent) 3800 rate = parent->rate; 3801 else 3802 rate = 0; 3803 core->rate = core->req_rate = rate; 3804 3805 /* 3806 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks 3807 * don't get accidentally disabled when walking the orphan tree and 3808 * reparenting clocks 3809 */ 3810 if (core->flags & CLK_IS_CRITICAL) { 3811 ret = clk_core_prepare(core); 3812 if (ret) { 3813 pr_warn("%s: critical clk '%s' failed to prepare\n", 3814 __func__, core->name); 3815 goto out; 3816 } 3817 3818 ret = clk_core_enable_lock(core); 3819 if (ret) { 3820 pr_warn("%s: critical clk '%s' failed to enable\n", 3821 __func__, core->name); 3822 clk_core_unprepare(core); 3823 goto out; 3824 } 3825 } 3826 3827 clk_core_reparent_orphans_nolock(); 3828 3829 kref_init(&core->ref); 3830 out: 3831 clk_pm_runtime_put(core); 3832 unlock: 3833 if (ret) { 3834 hlist_del_init(&core->child_node); 3835 core->hw->core = NULL; 3836 } 3837 3838 clk_prepare_unlock(); 3839 3840 if (!ret) 3841 clk_debug_register(core); 3842 3843 return ret; 3844 } 3845 3846 /** 3847 * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core 3848 * @core: clk to add consumer to 3849 * @clk: consumer to link to a clk 3850 */ 3851 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk) 3852 { 3853 clk_prepare_lock(); 3854 hlist_add_head(&clk->clks_node, &core->clks); 3855 clk_prepare_unlock(); 3856 } 3857 3858 /** 3859 * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core 3860 * @clk: consumer to unlink 3861 */ 3862 static void clk_core_unlink_consumer(struct clk *clk) 3863 { 3864 lockdep_assert_held(&prepare_lock); 3865 hlist_del(&clk->clks_node); 3866 } 3867 3868 /** 3869 * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core 3870 * @core: clk to allocate a consumer for 3871 * @dev_id: string describing device name 3872 * @con_id: connection ID string on device 3873 * 3874 * Returns: clk consumer left unlinked from the consumer list 3875 */ 3876 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id, 3877 const char *con_id) 3878 { 3879 struct clk *clk; 3880 3881 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 3882 if (!clk) 3883 return ERR_PTR(-ENOMEM); 3884 3885 clk->core = core; 3886 clk->dev_id = dev_id; 3887 clk->con_id = kstrdup_const(con_id, GFP_KERNEL); 3888 clk->max_rate = ULONG_MAX; 3889 3890 return clk; 3891 } 3892 3893 /** 3894 * free_clk - Free a clk consumer 3895 * @clk: clk consumer to free 3896 * 3897 * Note, this assumes the clk has been unlinked from the clk_core consumer 3898 * list. 3899 */ 3900 static void free_clk(struct clk *clk) 3901 { 3902 kfree_const(clk->con_id); 3903 kfree(clk); 3904 } 3905 3906 /** 3907 * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given 3908 * a clk_hw 3909 * @dev: clk consumer device 3910 * @hw: clk_hw associated with the clk being consumed 3911 * @dev_id: string describing device name 3912 * @con_id: connection ID string on device 3913 * 3914 * This is the main function used to create a clk pointer for use by clk 3915 * consumers. It connects a consumer to the clk_core and clk_hw structures 3916 * used by the framework and clk provider respectively. 3917 */ 3918 struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, 3919 const char *dev_id, const char *con_id) 3920 { 3921 struct clk *clk; 3922 struct clk_core *core; 3923 3924 /* This is to allow this function to be chained to others */ 3925 if (IS_ERR_OR_NULL(hw)) 3926 return ERR_CAST(hw); 3927 3928 core = hw->core; 3929 clk = alloc_clk(core, dev_id, con_id); 3930 if (IS_ERR(clk)) 3931 return clk; 3932 clk->dev = dev; 3933 3934 if (!try_module_get(core->owner)) { 3935 free_clk(clk); 3936 return ERR_PTR(-ENOENT); 3937 } 3938 3939 kref_get(&core->ref); 3940 clk_core_link_consumer(core, clk); 3941 3942 return clk; 3943 } 3944 3945 /** 3946 * clk_hw_get_clk - get clk consumer given an clk_hw 3947 * @hw: clk_hw associated with the clk being consumed 3948 * @con_id: connection ID string on device 3949 * 3950 * Returns: new clk consumer 3951 * This is the function to be used by providers which need 3952 * to get a consumer clk and act on the clock element 3953 * Calls to this function must be balanced with calls clk_put() 3954 */ 3955 struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id) 3956 { 3957 struct device *dev = hw->core->dev; 3958 const char *name = dev ? dev_name(dev) : NULL; 3959 3960 return clk_hw_create_clk(dev, hw, name, con_id); 3961 } 3962 EXPORT_SYMBOL(clk_hw_get_clk); 3963 3964 static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist) 3965 { 3966 const char *dst; 3967 3968 if (!src) { 3969 if (must_exist) 3970 return -EINVAL; 3971 return 0; 3972 } 3973 3974 *dst_p = dst = kstrdup_const(src, GFP_KERNEL); 3975 if (!dst) 3976 return -ENOMEM; 3977 3978 return 0; 3979 } 3980 3981 static int clk_core_populate_parent_map(struct clk_core *core, 3982 const struct clk_init_data *init) 3983 { 3984 u8 num_parents = init->num_parents; 3985 const char * const *parent_names = init->parent_names; 3986 const struct clk_hw **parent_hws = init->parent_hws; 3987 const struct clk_parent_data *parent_data = init->parent_data; 3988 int i, ret = 0; 3989 struct clk_parent_map *parents, *parent; 3990 3991 if (!num_parents) 3992 return 0; 3993 3994 /* 3995 * Avoid unnecessary string look-ups of clk_core's possible parents by 3996 * having a cache of names/clk_hw pointers to clk_core pointers. 3997 */ 3998 parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL); 3999 core->parents = parents; 4000 if (!parents) 4001 return -ENOMEM; 4002 4003 /* Copy everything over because it might be __initdata */ 4004 for (i = 0, parent = parents; i < num_parents; i++, parent++) { 4005 parent->index = -1; 4006 if (parent_names) { 4007 /* throw a WARN if any entries are NULL */ 4008 WARN(!parent_names[i], 4009 "%s: invalid NULL in %s's .parent_names\n", 4010 __func__, core->name); 4011 ret = clk_cpy_name(&parent->name, parent_names[i], 4012 true); 4013 } else if (parent_data) { 4014 parent->hw = parent_data[i].hw; 4015 parent->index = parent_data[i].index; 4016 ret = clk_cpy_name(&parent->fw_name, 4017 parent_data[i].fw_name, false); 4018 if (!ret) 4019 ret = clk_cpy_name(&parent->name, 4020 parent_data[i].name, 4021 false); 4022 } else if (parent_hws) { 4023 parent->hw = parent_hws[i]; 4024 } else { 4025 ret = -EINVAL; 4026 WARN(1, "Must specify parents if num_parents > 0\n"); 4027 } 4028 4029 if (ret) { 4030 do { 4031 kfree_const(parents[i].name); 4032 kfree_const(parents[i].fw_name); 4033 } while (--i >= 0); 4034 kfree(parents); 4035 4036 return ret; 4037 } 4038 } 4039 4040 return 0; 4041 } 4042 4043 static void clk_core_free_parent_map(struct clk_core *core) 4044 { 4045 int i = core->num_parents; 4046 4047 if (!core->num_parents) 4048 return; 4049 4050 while (--i >= 0) { 4051 kfree_const(core->parents[i].name); 4052 kfree_const(core->parents[i].fw_name); 4053 } 4054 4055 kfree(core->parents); 4056 } 4057 4058 static struct clk * 4059 __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) 4060 { 4061 int ret; 4062 struct clk_core *core; 4063 const struct clk_init_data *init = hw->init; 4064 4065 /* 4066 * The init data is not supposed to be used outside of registration path. 4067 * Set it to NULL so that provider drivers can't use it either and so that 4068 * we catch use of hw->init early on in the core. 4069 */ 4070 hw->init = NULL; 4071 4072 core = kzalloc(sizeof(*core), GFP_KERNEL); 4073 if (!core) { 4074 ret = -ENOMEM; 4075 goto fail_out; 4076 } 4077 4078 core->name = kstrdup_const(init->name, GFP_KERNEL); 4079 if (!core->name) { 4080 ret = -ENOMEM; 4081 goto fail_name; 4082 } 4083 4084 if (WARN_ON(!init->ops)) { 4085 ret = -EINVAL; 4086 goto fail_ops; 4087 } 4088 core->ops = init->ops; 4089 4090 if (dev && pm_runtime_enabled(dev)) 4091 core->rpm_enabled = true; 4092 core->dev = dev; 4093 core->of_node = np; 4094 if (dev && dev->driver) 4095 core->owner = dev->driver->owner; 4096 core->hw = hw; 4097 core->flags = init->flags; 4098 core->num_parents = init->num_parents; 4099 core->min_rate = 0; 4100 core->max_rate = ULONG_MAX; 4101 4102 ret = clk_core_populate_parent_map(core, init); 4103 if (ret) 4104 goto fail_parents; 4105 4106 INIT_HLIST_HEAD(&core->clks); 4107 4108 /* 4109 * Don't call clk_hw_create_clk() here because that would pin the 4110 * provider module to itself and prevent it from ever being removed. 4111 */ 4112 hw->clk = alloc_clk(core, NULL, NULL); 4113 if (IS_ERR(hw->clk)) { 4114 ret = PTR_ERR(hw->clk); 4115 goto fail_create_clk; 4116 } 4117 4118 clk_core_link_consumer(core, hw->clk); 4119 4120 ret = __clk_core_init(core); 4121 if (!ret) 4122 return hw->clk; 4123 4124 clk_prepare_lock(); 4125 clk_core_unlink_consumer(hw->clk); 4126 clk_prepare_unlock(); 4127 4128 free_clk(hw->clk); 4129 hw->clk = NULL; 4130 4131 fail_create_clk: 4132 clk_core_free_parent_map(core); 4133 fail_parents: 4134 fail_ops: 4135 kfree_const(core->name); 4136 fail_name: 4137 kfree(core); 4138 fail_out: 4139 return ERR_PTR(ret); 4140 } 4141 4142 /** 4143 * dev_or_parent_of_node() - Get device node of @dev or @dev's parent 4144 * @dev: Device to get device node of 4145 * 4146 * Return: device node pointer of @dev, or the device node pointer of 4147 * @dev->parent if dev doesn't have a device node, or NULL if neither 4148 * @dev or @dev->parent have a device node. 4149 */ 4150 static struct device_node *dev_or_parent_of_node(struct device *dev) 4151 { 4152 struct device_node *np; 4153 4154 if (!dev) 4155 return NULL; 4156 4157 np = dev_of_node(dev); 4158 if (!np) 4159 np = dev_of_node(dev->parent); 4160 4161 return np; 4162 } 4163 4164 /** 4165 * clk_register - allocate a new clock, register it and return an opaque cookie 4166 * @dev: device that is registering this clock 4167 * @hw: link to hardware-specific clock data 4168 * 4169 * clk_register is the *deprecated* interface for populating the clock tree with 4170 * new clock nodes. Use clk_hw_register() instead. 4171 * 4172 * Returns: a pointer to the newly allocated struct clk which 4173 * cannot be dereferenced by driver code but may be used in conjunction with the 4174 * rest of the clock API. In the event of an error clk_register will return an 4175 * error code; drivers must test for an error code after calling clk_register. 4176 */ 4177 struct clk *clk_register(struct device *dev, struct clk_hw *hw) 4178 { 4179 return __clk_register(dev, dev_or_parent_of_node(dev), hw); 4180 } 4181 EXPORT_SYMBOL_GPL(clk_register); 4182 4183 /** 4184 * clk_hw_register - register a clk_hw and return an error code 4185 * @dev: device that is registering this clock 4186 * @hw: link to hardware-specific clock data 4187 * 4188 * clk_hw_register is the primary interface for populating the clock tree with 4189 * new clock nodes. It returns an integer equal to zero indicating success or 4190 * less than zero indicating failure. Drivers must test for an error code after 4191 * calling clk_hw_register(). 4192 */ 4193 int clk_hw_register(struct device *dev, struct clk_hw *hw) 4194 { 4195 return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev), 4196 hw)); 4197 } 4198 EXPORT_SYMBOL_GPL(clk_hw_register); 4199 4200 /* 4201 * of_clk_hw_register - register a clk_hw and return an error code 4202 * @node: device_node of device that is registering this clock 4203 * @hw: link to hardware-specific clock data 4204 * 4205 * of_clk_hw_register() is the primary interface for populating the clock tree 4206 * with new clock nodes when a struct device is not available, but a struct 4207 * device_node is. It returns an integer equal to zero indicating success or 4208 * less than zero indicating failure. Drivers must test for an error code after 4209 * calling of_clk_hw_register(). 4210 */ 4211 int of_clk_hw_register(struct device_node *node, struct clk_hw *hw) 4212 { 4213 return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw)); 4214 } 4215 EXPORT_SYMBOL_GPL(of_clk_hw_register); 4216 4217 /* Free memory allocated for a clock. */ 4218 static void __clk_release(struct kref *ref) 4219 { 4220 struct clk_core *core = container_of(ref, struct clk_core, ref); 4221 4222 lockdep_assert_held(&prepare_lock); 4223 4224 clk_core_free_parent_map(core); 4225 kfree_const(core->name); 4226 kfree(core); 4227 } 4228 4229 /* 4230 * Empty clk_ops for unregistered clocks. These are used temporarily 4231 * after clk_unregister() was called on a clock and until last clock 4232 * consumer calls clk_put() and the struct clk object is freed. 4233 */ 4234 static int clk_nodrv_prepare_enable(struct clk_hw *hw) 4235 { 4236 return -ENXIO; 4237 } 4238 4239 static void clk_nodrv_disable_unprepare(struct clk_hw *hw) 4240 { 4241 WARN_ON_ONCE(1); 4242 } 4243 4244 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, 4245 unsigned long parent_rate) 4246 { 4247 return -ENXIO; 4248 } 4249 4250 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) 4251 { 4252 return -ENXIO; 4253 } 4254 4255 static const struct clk_ops clk_nodrv_ops = { 4256 .enable = clk_nodrv_prepare_enable, 4257 .disable = clk_nodrv_disable_unprepare, 4258 .prepare = clk_nodrv_prepare_enable, 4259 .unprepare = clk_nodrv_disable_unprepare, 4260 .set_rate = clk_nodrv_set_rate, 4261 .set_parent = clk_nodrv_set_parent, 4262 }; 4263 4264 static void clk_core_evict_parent_cache_subtree(struct clk_core *root, 4265 const struct clk_core *target) 4266 { 4267 int i; 4268 struct clk_core *child; 4269 4270 for (i = 0; i < root->num_parents; i++) 4271 if (root->parents[i].core == target) 4272 root->parents[i].core = NULL; 4273 4274 hlist_for_each_entry(child, &root->children, child_node) 4275 clk_core_evict_parent_cache_subtree(child, target); 4276 } 4277 4278 /* Remove this clk from all parent caches */ 4279 static void clk_core_evict_parent_cache(struct clk_core *core) 4280 { 4281 const struct hlist_head **lists; 4282 struct clk_core *root; 4283 4284 lockdep_assert_held(&prepare_lock); 4285 4286 for (lists = all_lists; *lists; lists++) 4287 hlist_for_each_entry(root, *lists, child_node) 4288 clk_core_evict_parent_cache_subtree(root, core); 4289 4290 } 4291 4292 /** 4293 * clk_unregister - unregister a currently registered clock 4294 * @clk: clock to unregister 4295 */ 4296 void clk_unregister(struct clk *clk) 4297 { 4298 unsigned long flags; 4299 const struct clk_ops *ops; 4300 4301 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 4302 return; 4303 4304 clk_debug_unregister(clk->core); 4305 4306 clk_prepare_lock(); 4307 4308 ops = clk->core->ops; 4309 if (ops == &clk_nodrv_ops) { 4310 pr_err("%s: unregistered clock: %s\n", __func__, 4311 clk->core->name); 4312 goto unlock; 4313 } 4314 /* 4315 * Assign empty clock ops for consumers that might still hold 4316 * a reference to this clock. 4317 */ 4318 flags = clk_enable_lock(); 4319 clk->core->ops = &clk_nodrv_ops; 4320 clk_enable_unlock(flags); 4321 4322 if (ops->terminate) 4323 ops->terminate(clk->core->hw); 4324 4325 if (!hlist_empty(&clk->core->children)) { 4326 struct clk_core *child; 4327 struct hlist_node *t; 4328 4329 /* Reparent all children to the orphan list. */ 4330 hlist_for_each_entry_safe(child, t, &clk->core->children, 4331 child_node) 4332 clk_core_set_parent_nolock(child, NULL); 4333 } 4334 4335 clk_core_evict_parent_cache(clk->core); 4336 4337 hlist_del_init(&clk->core->child_node); 4338 4339 if (clk->core->prepare_count) 4340 pr_warn("%s: unregistering prepared clock: %s\n", 4341 __func__, clk->core->name); 4342 4343 if (clk->core->protect_count) 4344 pr_warn("%s: unregistering protected clock: %s\n", 4345 __func__, clk->core->name); 4346 4347 kref_put(&clk->core->ref, __clk_release); 4348 free_clk(clk); 4349 unlock: 4350 clk_prepare_unlock(); 4351 } 4352 EXPORT_SYMBOL_GPL(clk_unregister); 4353 4354 /** 4355 * clk_hw_unregister - unregister a currently registered clk_hw 4356 * @hw: hardware-specific clock data to unregister 4357 */ 4358 void clk_hw_unregister(struct clk_hw *hw) 4359 { 4360 clk_unregister(hw->clk); 4361 } 4362 EXPORT_SYMBOL_GPL(clk_hw_unregister); 4363 4364 static void devm_clk_unregister_cb(struct device *dev, void *res) 4365 { 4366 clk_unregister(*(struct clk **)res); 4367 } 4368 4369 static void devm_clk_hw_unregister_cb(struct device *dev, void *res) 4370 { 4371 clk_hw_unregister(*(struct clk_hw **)res); 4372 } 4373 4374 /** 4375 * devm_clk_register - resource managed clk_register() 4376 * @dev: device that is registering this clock 4377 * @hw: link to hardware-specific clock data 4378 * 4379 * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead. 4380 * 4381 * Clocks returned from this function are automatically clk_unregister()ed on 4382 * driver detach. See clk_register() for more information. 4383 */ 4384 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 4385 { 4386 struct clk *clk; 4387 struct clk **clkp; 4388 4389 clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL); 4390 if (!clkp) 4391 return ERR_PTR(-ENOMEM); 4392 4393 clk = clk_register(dev, hw); 4394 if (!IS_ERR(clk)) { 4395 *clkp = clk; 4396 devres_add(dev, clkp); 4397 } else { 4398 devres_free(clkp); 4399 } 4400 4401 return clk; 4402 } 4403 EXPORT_SYMBOL_GPL(devm_clk_register); 4404 4405 /** 4406 * devm_clk_hw_register - resource managed clk_hw_register() 4407 * @dev: device that is registering this clock 4408 * @hw: link to hardware-specific clock data 4409 * 4410 * Managed clk_hw_register(). Clocks registered by this function are 4411 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register() 4412 * for more information. 4413 */ 4414 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw) 4415 { 4416 struct clk_hw **hwp; 4417 int ret; 4418 4419 hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL); 4420 if (!hwp) 4421 return -ENOMEM; 4422 4423 ret = clk_hw_register(dev, hw); 4424 if (!ret) { 4425 *hwp = hw; 4426 devres_add(dev, hwp); 4427 } else { 4428 devres_free(hwp); 4429 } 4430 4431 return ret; 4432 } 4433 EXPORT_SYMBOL_GPL(devm_clk_hw_register); 4434 4435 static void devm_clk_release(struct device *dev, void *res) 4436 { 4437 clk_put(*(struct clk **)res); 4438 } 4439 4440 /** 4441 * devm_clk_hw_get_clk - resource managed clk_hw_get_clk() 4442 * @dev: device that is registering this clock 4443 * @hw: clk_hw associated with the clk being consumed 4444 * @con_id: connection ID string on device 4445 * 4446 * Managed clk_hw_get_clk(). Clocks got with this function are 4447 * automatically clk_put() on driver detach. See clk_put() 4448 * for more information. 4449 */ 4450 struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw, 4451 const char *con_id) 4452 { 4453 struct clk *clk; 4454 struct clk **clkp; 4455 4456 /* This should not happen because it would mean we have drivers 4457 * passing around clk_hw pointers instead of having the caller use 4458 * proper clk_get() style APIs 4459 */ 4460 WARN_ON_ONCE(dev != hw->core->dev); 4461 4462 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); 4463 if (!clkp) 4464 return ERR_PTR(-ENOMEM); 4465 4466 clk = clk_hw_get_clk(hw, con_id); 4467 if (!IS_ERR(clk)) { 4468 *clkp = clk; 4469 devres_add(dev, clkp); 4470 } else { 4471 devres_free(clkp); 4472 } 4473 4474 return clk; 4475 } 4476 EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk); 4477 4478 /* 4479 * clkdev helpers 4480 */ 4481 4482 void __clk_put(struct clk *clk) 4483 { 4484 struct module *owner; 4485 4486 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 4487 return; 4488 4489 clk_prepare_lock(); 4490 4491 /* 4492 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a 4493 * given user should be balanced with calls to clk_rate_exclusive_put() 4494 * and by that same consumer 4495 */ 4496 if (WARN_ON(clk->exclusive_count)) { 4497 /* We voiced our concern, let's sanitize the situation */ 4498 clk->core->protect_count -= (clk->exclusive_count - 1); 4499 clk_core_rate_unprotect(clk->core); 4500 clk->exclusive_count = 0; 4501 } 4502 4503 hlist_del(&clk->clks_node); 4504 4505 /* If we had any boundaries on that clock, let's drop them. */ 4506 if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX) 4507 clk_set_rate_range_nolock(clk, 0, ULONG_MAX); 4508 4509 owner = clk->core->owner; 4510 kref_put(&clk->core->ref, __clk_release); 4511 4512 clk_prepare_unlock(); 4513 4514 module_put(owner); 4515 4516 free_clk(clk); 4517 } 4518 4519 /*** clk rate change notifiers ***/ 4520 4521 /** 4522 * clk_notifier_register - add a clk rate change notifier 4523 * @clk: struct clk * to watch 4524 * @nb: struct notifier_block * with callback info 4525 * 4526 * Request notification when clk's rate changes. This uses an SRCU 4527 * notifier because we want it to block and notifier unregistrations are 4528 * uncommon. The callbacks associated with the notifier must not 4529 * re-enter into the clk framework by calling any top-level clk APIs; 4530 * this will cause a nested prepare_lock mutex. 4531 * 4532 * In all notification cases (pre, post and abort rate change) the original 4533 * clock rate is passed to the callback via struct clk_notifier_data.old_rate 4534 * and the new frequency is passed via struct clk_notifier_data.new_rate. 4535 * 4536 * clk_notifier_register() must be called from non-atomic context. 4537 * Returns -EINVAL if called with null arguments, -ENOMEM upon 4538 * allocation failure; otherwise, passes along the return value of 4539 * srcu_notifier_chain_register(). 4540 */ 4541 int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 4542 { 4543 struct clk_notifier *cn; 4544 int ret = -ENOMEM; 4545 4546 if (!clk || !nb) 4547 return -EINVAL; 4548 4549 clk_prepare_lock(); 4550 4551 /* search the list of notifiers for this clk */ 4552 list_for_each_entry(cn, &clk_notifier_list, node) 4553 if (cn->clk == clk) 4554 goto found; 4555 4556 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 4557 cn = kzalloc(sizeof(*cn), GFP_KERNEL); 4558 if (!cn) 4559 goto out; 4560 4561 cn->clk = clk; 4562 srcu_init_notifier_head(&cn->notifier_head); 4563 4564 list_add(&cn->node, &clk_notifier_list); 4565 4566 found: 4567 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 4568 4569 clk->core->notifier_count++; 4570 4571 out: 4572 clk_prepare_unlock(); 4573 4574 return ret; 4575 } 4576 EXPORT_SYMBOL_GPL(clk_notifier_register); 4577 4578 /** 4579 * clk_notifier_unregister - remove a clk rate change notifier 4580 * @clk: struct clk * 4581 * @nb: struct notifier_block * with callback info 4582 * 4583 * Request no further notification for changes to 'clk' and frees memory 4584 * allocated in clk_notifier_register. 4585 * 4586 * Returns -EINVAL if called with null arguments; otherwise, passes 4587 * along the return value of srcu_notifier_chain_unregister(). 4588 */ 4589 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 4590 { 4591 struct clk_notifier *cn; 4592 int ret = -ENOENT; 4593 4594 if (!clk || !nb) 4595 return -EINVAL; 4596 4597 clk_prepare_lock(); 4598 4599 list_for_each_entry(cn, &clk_notifier_list, node) { 4600 if (cn->clk == clk) { 4601 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 4602 4603 clk->core->notifier_count--; 4604 4605 /* XXX the notifier code should handle this better */ 4606 if (!cn->notifier_head.head) { 4607 srcu_cleanup_notifier_head(&cn->notifier_head); 4608 list_del(&cn->node); 4609 kfree(cn); 4610 } 4611 break; 4612 } 4613 } 4614 4615 clk_prepare_unlock(); 4616 4617 return ret; 4618 } 4619 EXPORT_SYMBOL_GPL(clk_notifier_unregister); 4620 4621 struct clk_notifier_devres { 4622 struct clk *clk; 4623 struct notifier_block *nb; 4624 }; 4625 4626 static void devm_clk_notifier_release(struct device *dev, void *res) 4627 { 4628 struct clk_notifier_devres *devres = res; 4629 4630 clk_notifier_unregister(devres->clk, devres->nb); 4631 } 4632 4633 int devm_clk_notifier_register(struct device *dev, struct clk *clk, 4634 struct notifier_block *nb) 4635 { 4636 struct clk_notifier_devres *devres; 4637 int ret; 4638 4639 devres = devres_alloc(devm_clk_notifier_release, 4640 sizeof(*devres), GFP_KERNEL); 4641 4642 if (!devres) 4643 return -ENOMEM; 4644 4645 ret = clk_notifier_register(clk, nb); 4646 if (!ret) { 4647 devres->clk = clk; 4648 devres->nb = nb; 4649 } else { 4650 devres_free(devres); 4651 } 4652 4653 return ret; 4654 } 4655 EXPORT_SYMBOL_GPL(devm_clk_notifier_register); 4656 4657 #ifdef CONFIG_OF 4658 static void clk_core_reparent_orphans(void) 4659 { 4660 clk_prepare_lock(); 4661 clk_core_reparent_orphans_nolock(); 4662 clk_prepare_unlock(); 4663 } 4664 4665 /** 4666 * struct of_clk_provider - Clock provider registration structure 4667 * @link: Entry in global list of clock providers 4668 * @node: Pointer to device tree node of clock provider 4669 * @get: Get clock callback. Returns NULL or a struct clk for the 4670 * given clock specifier 4671 * @get_hw: Get clk_hw callback. Returns NULL, ERR_PTR or a 4672 * struct clk_hw for the given clock specifier 4673 * @data: context pointer to be passed into @get callback 4674 */ 4675 struct of_clk_provider { 4676 struct list_head link; 4677 4678 struct device_node *node; 4679 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 4680 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data); 4681 void *data; 4682 }; 4683 4684 extern struct of_device_id __clk_of_table; 4685 static const struct of_device_id __clk_of_table_sentinel 4686 __used __section("__clk_of_table_end"); 4687 4688 static LIST_HEAD(of_clk_providers); 4689 static DEFINE_MUTEX(of_clk_mutex); 4690 4691 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 4692 void *data) 4693 { 4694 return data; 4695 } 4696 EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 4697 4698 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) 4699 { 4700 return data; 4701 } 4702 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get); 4703 4704 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 4705 { 4706 struct clk_onecell_data *clk_data = data; 4707 unsigned int idx = clkspec->args[0]; 4708 4709 if (idx >= clk_data->clk_num) { 4710 pr_err("%s: invalid clock index %u\n", __func__, idx); 4711 return ERR_PTR(-EINVAL); 4712 } 4713 4714 return clk_data->clks[idx]; 4715 } 4716 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 4717 4718 struct clk_hw * 4719 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) 4720 { 4721 struct clk_hw_onecell_data *hw_data = data; 4722 unsigned int idx = clkspec->args[0]; 4723 4724 if (idx >= hw_data->num) { 4725 pr_err("%s: invalid index %u\n", __func__, idx); 4726 return ERR_PTR(-EINVAL); 4727 } 4728 4729 return hw_data->hws[idx]; 4730 } 4731 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get); 4732 4733 /** 4734 * of_clk_add_provider() - Register a clock provider for a node 4735 * @np: Device node pointer associated with clock provider 4736 * @clk_src_get: callback for decoding clock 4737 * @data: context pointer for @clk_src_get callback. 4738 * 4739 * This function is *deprecated*. Use of_clk_add_hw_provider() instead. 4740 */ 4741 int of_clk_add_provider(struct device_node *np, 4742 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 4743 void *data), 4744 void *data) 4745 { 4746 struct of_clk_provider *cp; 4747 int ret; 4748 4749 if (!np) 4750 return 0; 4751 4752 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 4753 if (!cp) 4754 return -ENOMEM; 4755 4756 cp->node = of_node_get(np); 4757 cp->data = data; 4758 cp->get = clk_src_get; 4759 4760 mutex_lock(&of_clk_mutex); 4761 list_add(&cp->link, &of_clk_providers); 4762 mutex_unlock(&of_clk_mutex); 4763 pr_debug("Added clock from %pOF\n", np); 4764 4765 clk_core_reparent_orphans(); 4766 4767 ret = of_clk_set_defaults(np, true); 4768 if (ret < 0) 4769 of_clk_del_provider(np); 4770 4771 fwnode_dev_initialized(&np->fwnode, true); 4772 4773 return ret; 4774 } 4775 EXPORT_SYMBOL_GPL(of_clk_add_provider); 4776 4777 /** 4778 * of_clk_add_hw_provider() - Register a clock provider for a node 4779 * @np: Device node pointer associated with clock provider 4780 * @get: callback for decoding clk_hw 4781 * @data: context pointer for @get callback. 4782 */ 4783 int of_clk_add_hw_provider(struct device_node *np, 4784 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 4785 void *data), 4786 void *data) 4787 { 4788 struct of_clk_provider *cp; 4789 int ret; 4790 4791 if (!np) 4792 return 0; 4793 4794 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 4795 if (!cp) 4796 return -ENOMEM; 4797 4798 cp->node = of_node_get(np); 4799 cp->data = data; 4800 cp->get_hw = get; 4801 4802 mutex_lock(&of_clk_mutex); 4803 list_add(&cp->link, &of_clk_providers); 4804 mutex_unlock(&of_clk_mutex); 4805 pr_debug("Added clk_hw provider from %pOF\n", np); 4806 4807 clk_core_reparent_orphans(); 4808 4809 ret = of_clk_set_defaults(np, true); 4810 if (ret < 0) 4811 of_clk_del_provider(np); 4812 4813 fwnode_dev_initialized(&np->fwnode, true); 4814 4815 return ret; 4816 } 4817 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider); 4818 4819 static void devm_of_clk_release_provider(struct device *dev, void *res) 4820 { 4821 of_clk_del_provider(*(struct device_node **)res); 4822 } 4823 4824 /* 4825 * We allow a child device to use its parent device as the clock provider node 4826 * for cases like MFD sub-devices where the child device driver wants to use 4827 * devm_*() APIs but not list the device in DT as a sub-node. 4828 */ 4829 static struct device_node *get_clk_provider_node(struct device *dev) 4830 { 4831 struct device_node *np, *parent_np; 4832 4833 np = dev->of_node; 4834 parent_np = dev->parent ? dev->parent->of_node : NULL; 4835 4836 if (!of_find_property(np, "#clock-cells", NULL)) 4837 if (of_find_property(parent_np, "#clock-cells", NULL)) 4838 np = parent_np; 4839 4840 return np; 4841 } 4842 4843 /** 4844 * devm_of_clk_add_hw_provider() - Managed clk provider node registration 4845 * @dev: Device acting as the clock provider (used for DT node and lifetime) 4846 * @get: callback for decoding clk_hw 4847 * @data: context pointer for @get callback 4848 * 4849 * Registers clock provider for given device's node. If the device has no DT 4850 * node or if the device node lacks of clock provider information (#clock-cells) 4851 * then the parent device's node is scanned for this information. If parent node 4852 * has the #clock-cells then it is used in registration. Provider is 4853 * automatically released at device exit. 4854 * 4855 * Return: 0 on success or an errno on failure. 4856 */ 4857 int devm_of_clk_add_hw_provider(struct device *dev, 4858 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 4859 void *data), 4860 void *data) 4861 { 4862 struct device_node **ptr, *np; 4863 int ret; 4864 4865 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr), 4866 GFP_KERNEL); 4867 if (!ptr) 4868 return -ENOMEM; 4869 4870 np = get_clk_provider_node(dev); 4871 ret = of_clk_add_hw_provider(np, get, data); 4872 if (!ret) { 4873 *ptr = np; 4874 devres_add(dev, ptr); 4875 } else { 4876 devres_free(ptr); 4877 } 4878 4879 return ret; 4880 } 4881 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider); 4882 4883 /** 4884 * of_clk_del_provider() - Remove a previously registered clock provider 4885 * @np: Device node pointer associated with clock provider 4886 */ 4887 void of_clk_del_provider(struct device_node *np) 4888 { 4889 struct of_clk_provider *cp; 4890 4891 if (!np) 4892 return; 4893 4894 mutex_lock(&of_clk_mutex); 4895 list_for_each_entry(cp, &of_clk_providers, link) { 4896 if (cp->node == np) { 4897 list_del(&cp->link); 4898 fwnode_dev_initialized(&np->fwnode, false); 4899 of_node_put(cp->node); 4900 kfree(cp); 4901 break; 4902 } 4903 } 4904 mutex_unlock(&of_clk_mutex); 4905 } 4906 EXPORT_SYMBOL_GPL(of_clk_del_provider); 4907 4908 /** 4909 * of_parse_clkspec() - Parse a DT clock specifier for a given device node 4910 * @np: device node to parse clock specifier from 4911 * @index: index of phandle to parse clock out of. If index < 0, @name is used 4912 * @name: clock name to find and parse. If name is NULL, the index is used 4913 * @out_args: Result of parsing the clock specifier 4914 * 4915 * Parses a device node's "clocks" and "clock-names" properties to find the 4916 * phandle and cells for the index or name that is desired. The resulting clock 4917 * specifier is placed into @out_args, or an errno is returned when there's a 4918 * parsing error. The @index argument is ignored if @name is non-NULL. 4919 * 4920 * Example: 4921 * 4922 * phandle1: clock-controller@1 { 4923 * #clock-cells = <2>; 4924 * } 4925 * 4926 * phandle2: clock-controller@2 { 4927 * #clock-cells = <1>; 4928 * } 4929 * 4930 * clock-consumer@3 { 4931 * clocks = <&phandle1 1 2 &phandle2 3>; 4932 * clock-names = "name1", "name2"; 4933 * } 4934 * 4935 * To get a device_node for `clock-controller@2' node you may call this 4936 * function a few different ways: 4937 * 4938 * of_parse_clkspec(clock-consumer@3, -1, "name2", &args); 4939 * of_parse_clkspec(clock-consumer@3, 1, NULL, &args); 4940 * of_parse_clkspec(clock-consumer@3, 1, "name2", &args); 4941 * 4942 * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT 4943 * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in 4944 * the "clock-names" property of @np. 4945 */ 4946 static int of_parse_clkspec(const struct device_node *np, int index, 4947 const char *name, struct of_phandle_args *out_args) 4948 { 4949 int ret = -ENOENT; 4950 4951 /* Walk up the tree of devices looking for a clock property that matches */ 4952 while (np) { 4953 /* 4954 * For named clocks, first look up the name in the 4955 * "clock-names" property. If it cannot be found, then index 4956 * will be an error code and of_parse_phandle_with_args() will 4957 * return -EINVAL. 4958 */ 4959 if (name) 4960 index = of_property_match_string(np, "clock-names", name); 4961 ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 4962 index, out_args); 4963 if (!ret) 4964 break; 4965 if (name && index >= 0) 4966 break; 4967 4968 /* 4969 * No matching clock found on this node. If the parent node 4970 * has a "clock-ranges" property, then we can try one of its 4971 * clocks. 4972 */ 4973 np = np->parent; 4974 if (np && !of_get_property(np, "clock-ranges", NULL)) 4975 break; 4976 index = 0; 4977 } 4978 4979 return ret; 4980 } 4981 4982 static struct clk_hw * 4983 __of_clk_get_hw_from_provider(struct of_clk_provider *provider, 4984 struct of_phandle_args *clkspec) 4985 { 4986 struct clk *clk; 4987 4988 if (provider->get_hw) 4989 return provider->get_hw(clkspec, provider->data); 4990 4991 clk = provider->get(clkspec, provider->data); 4992 if (IS_ERR(clk)) 4993 return ERR_CAST(clk); 4994 return __clk_get_hw(clk); 4995 } 4996 4997 static struct clk_hw * 4998 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) 4999 { 5000 struct of_clk_provider *provider; 5001 struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER); 5002 5003 if (!clkspec) 5004 return ERR_PTR(-EINVAL); 5005 5006 mutex_lock(&of_clk_mutex); 5007 list_for_each_entry(provider, &of_clk_providers, link) { 5008 if (provider->node == clkspec->np) { 5009 hw = __of_clk_get_hw_from_provider(provider, clkspec); 5010 if (!IS_ERR(hw)) 5011 break; 5012 } 5013 } 5014 mutex_unlock(&of_clk_mutex); 5015 5016 return hw; 5017 } 5018 5019 /** 5020 * of_clk_get_from_provider() - Lookup a clock from a clock provider 5021 * @clkspec: pointer to a clock specifier data structure 5022 * 5023 * This function looks up a struct clk from the registered list of clock 5024 * providers, an input is a clock specifier data structure as returned 5025 * from the of_parse_phandle_with_args() function call. 5026 */ 5027 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 5028 { 5029 struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec); 5030 5031 return clk_hw_create_clk(NULL, hw, NULL, __func__); 5032 } 5033 EXPORT_SYMBOL_GPL(of_clk_get_from_provider); 5034 5035 struct clk_hw *of_clk_get_hw(struct device_node *np, int index, 5036 const char *con_id) 5037 { 5038 int ret; 5039 struct clk_hw *hw; 5040 struct of_phandle_args clkspec; 5041 5042 ret = of_parse_clkspec(np, index, con_id, &clkspec); 5043 if (ret) 5044 return ERR_PTR(ret); 5045 5046 hw = of_clk_get_hw_from_clkspec(&clkspec); 5047 of_node_put(clkspec.np); 5048 5049 return hw; 5050 } 5051 5052 static struct clk *__of_clk_get(struct device_node *np, 5053 int index, const char *dev_id, 5054 const char *con_id) 5055 { 5056 struct clk_hw *hw = of_clk_get_hw(np, index, con_id); 5057 5058 return clk_hw_create_clk(NULL, hw, dev_id, con_id); 5059 } 5060 5061 struct clk *of_clk_get(struct device_node *np, int index) 5062 { 5063 return __of_clk_get(np, index, np->full_name, NULL); 5064 } 5065 EXPORT_SYMBOL(of_clk_get); 5066 5067 /** 5068 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node 5069 * @np: pointer to clock consumer node 5070 * @name: name of consumer's clock input, or NULL for the first clock reference 5071 * 5072 * This function parses the clocks and clock-names properties, 5073 * and uses them to look up the struct clk from the registered list of clock 5074 * providers. 5075 */ 5076 struct clk *of_clk_get_by_name(struct device_node *np, const char *name) 5077 { 5078 if (!np) 5079 return ERR_PTR(-ENOENT); 5080 5081 return __of_clk_get(np, 0, np->full_name, name); 5082 } 5083 EXPORT_SYMBOL(of_clk_get_by_name); 5084 5085 /** 5086 * of_clk_get_parent_count() - Count the number of clocks a device node has 5087 * @np: device node to count 5088 * 5089 * Returns: The number of clocks that are possible parents of this node 5090 */ 5091 unsigned int of_clk_get_parent_count(const struct device_node *np) 5092 { 5093 int count; 5094 5095 count = of_count_phandle_with_args(np, "clocks", "#clock-cells"); 5096 if (count < 0) 5097 return 0; 5098 5099 return count; 5100 } 5101 EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 5102 5103 const char *of_clk_get_parent_name(const struct device_node *np, int index) 5104 { 5105 struct of_phandle_args clkspec; 5106 struct property *prop; 5107 const char *clk_name; 5108 const __be32 *vp; 5109 u32 pv; 5110 int rc; 5111 int count; 5112 struct clk *clk; 5113 5114 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 5115 &clkspec); 5116 if (rc) 5117 return NULL; 5118 5119 index = clkspec.args_count ? clkspec.args[0] : 0; 5120 count = 0; 5121 5122 /* if there is an indices property, use it to transfer the index 5123 * specified into an array offset for the clock-output-names property. 5124 */ 5125 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { 5126 if (index == pv) { 5127 index = count; 5128 break; 5129 } 5130 count++; 5131 } 5132 /* We went off the end of 'clock-indices' without finding it */ 5133 if (prop && !vp) 5134 return NULL; 5135 5136 if (of_property_read_string_index(clkspec.np, "clock-output-names", 5137 index, 5138 &clk_name) < 0) { 5139 /* 5140 * Best effort to get the name if the clock has been 5141 * registered with the framework. If the clock isn't 5142 * registered, we return the node name as the name of 5143 * the clock as long as #clock-cells = 0. 5144 */ 5145 clk = of_clk_get_from_provider(&clkspec); 5146 if (IS_ERR(clk)) { 5147 if (clkspec.args_count == 0) 5148 clk_name = clkspec.np->name; 5149 else 5150 clk_name = NULL; 5151 } else { 5152 clk_name = __clk_get_name(clk); 5153 clk_put(clk); 5154 } 5155 } 5156 5157 5158 of_node_put(clkspec.np); 5159 return clk_name; 5160 } 5161 EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 5162 5163 /** 5164 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return 5165 * number of parents 5166 * @np: Device node pointer associated with clock provider 5167 * @parents: pointer to char array that hold the parents' names 5168 * @size: size of the @parents array 5169 * 5170 * Return: number of parents for the clock node. 5171 */ 5172 int of_clk_parent_fill(struct device_node *np, const char **parents, 5173 unsigned int size) 5174 { 5175 unsigned int i = 0; 5176 5177 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) 5178 i++; 5179 5180 return i; 5181 } 5182 EXPORT_SYMBOL_GPL(of_clk_parent_fill); 5183 5184 struct clock_provider { 5185 void (*clk_init_cb)(struct device_node *); 5186 struct device_node *np; 5187 struct list_head node; 5188 }; 5189 5190 /* 5191 * This function looks for a parent clock. If there is one, then it 5192 * checks that the provider for this parent clock was initialized, in 5193 * this case the parent clock will be ready. 5194 */ 5195 static int parent_ready(struct device_node *np) 5196 { 5197 int i = 0; 5198 5199 while (true) { 5200 struct clk *clk = of_clk_get(np, i); 5201 5202 /* this parent is ready we can check the next one */ 5203 if (!IS_ERR(clk)) { 5204 clk_put(clk); 5205 i++; 5206 continue; 5207 } 5208 5209 /* at least one parent is not ready, we exit now */ 5210 if (PTR_ERR(clk) == -EPROBE_DEFER) 5211 return 0; 5212 5213 /* 5214 * Here we make assumption that the device tree is 5215 * written correctly. So an error means that there is 5216 * no more parent. As we didn't exit yet, then the 5217 * previous parent are ready. If there is no clock 5218 * parent, no need to wait for them, then we can 5219 * consider their absence as being ready 5220 */ 5221 return 1; 5222 } 5223 } 5224 5225 /** 5226 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree 5227 * @np: Device node pointer associated with clock provider 5228 * @index: clock index 5229 * @flags: pointer to top-level framework flags 5230 * 5231 * Detects if the clock-critical property exists and, if so, sets the 5232 * corresponding CLK_IS_CRITICAL flag. 5233 * 5234 * Do not use this function. It exists only for legacy Device Tree 5235 * bindings, such as the one-clock-per-node style that are outdated. 5236 * Those bindings typically put all clock data into .dts and the Linux 5237 * driver has no clock data, thus making it impossible to set this flag 5238 * correctly from the driver. Only those drivers may call 5239 * of_clk_detect_critical from their setup functions. 5240 * 5241 * Return: error code or zero on success 5242 */ 5243 int of_clk_detect_critical(struct device_node *np, int index, 5244 unsigned long *flags) 5245 { 5246 struct property *prop; 5247 const __be32 *cur; 5248 uint32_t idx; 5249 5250 if (!np || !flags) 5251 return -EINVAL; 5252 5253 of_property_for_each_u32(np, "clock-critical", prop, cur, idx) 5254 if (index == idx) 5255 *flags |= CLK_IS_CRITICAL; 5256 5257 return 0; 5258 } 5259 5260 /** 5261 * of_clk_init() - Scan and init clock providers from the DT 5262 * @matches: array of compatible values and init functions for providers. 5263 * 5264 * This function scans the device tree for matching clock providers 5265 * and calls their initialization functions. It also does it by trying 5266 * to follow the dependencies. 5267 */ 5268 void __init of_clk_init(const struct of_device_id *matches) 5269 { 5270 const struct of_device_id *match; 5271 struct device_node *np; 5272 struct clock_provider *clk_provider, *next; 5273 bool is_init_done; 5274 bool force = false; 5275 LIST_HEAD(clk_provider_list); 5276 5277 if (!matches) 5278 matches = &__clk_of_table; 5279 5280 /* First prepare the list of the clocks providers */ 5281 for_each_matching_node_and_match(np, matches, &match) { 5282 struct clock_provider *parent; 5283 5284 if (!of_device_is_available(np)) 5285 continue; 5286 5287 parent = kzalloc(sizeof(*parent), GFP_KERNEL); 5288 if (!parent) { 5289 list_for_each_entry_safe(clk_provider, next, 5290 &clk_provider_list, node) { 5291 list_del(&clk_provider->node); 5292 of_node_put(clk_provider->np); 5293 kfree(clk_provider); 5294 } 5295 of_node_put(np); 5296 return; 5297 } 5298 5299 parent->clk_init_cb = match->data; 5300 parent->np = of_node_get(np); 5301 list_add_tail(&parent->node, &clk_provider_list); 5302 } 5303 5304 while (!list_empty(&clk_provider_list)) { 5305 is_init_done = false; 5306 list_for_each_entry_safe(clk_provider, next, 5307 &clk_provider_list, node) { 5308 if (force || parent_ready(clk_provider->np)) { 5309 5310 /* Don't populate platform devices */ 5311 of_node_set_flag(clk_provider->np, 5312 OF_POPULATED); 5313 5314 clk_provider->clk_init_cb(clk_provider->np); 5315 of_clk_set_defaults(clk_provider->np, true); 5316 5317 list_del(&clk_provider->node); 5318 of_node_put(clk_provider->np); 5319 kfree(clk_provider); 5320 is_init_done = true; 5321 } 5322 } 5323 5324 /* 5325 * We didn't manage to initialize any of the 5326 * remaining providers during the last loop, so now we 5327 * initialize all the remaining ones unconditionally 5328 * in case the clock parent was not mandatory 5329 */ 5330 if (!is_init_done) 5331 force = true; 5332 } 5333 } 5334 #endif 5335