1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 4 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 5 * 6 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst 7 */ 8 9 #include <linux/clk.h> 10 #include <linux/clk-provider.h> 11 #include <linux/clk/clk-conf.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/spinlock.h> 15 #include <linux/err.h> 16 #include <linux/list.h> 17 #include <linux/slab.h> 18 #include <linux/of.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/sched.h> 23 #include <linux/clkdev.h> 24 25 #include "clk.h" 26 27 static DEFINE_SPINLOCK(enable_lock); 28 static DEFINE_MUTEX(prepare_lock); 29 30 static struct task_struct *prepare_owner; 31 static struct task_struct *enable_owner; 32 33 static int prepare_refcnt; 34 static int enable_refcnt; 35 36 static HLIST_HEAD(clk_root_list); 37 static HLIST_HEAD(clk_orphan_list); 38 static LIST_HEAD(clk_notifier_list); 39 40 static const struct hlist_head *all_lists[] = { 41 &clk_root_list, 42 &clk_orphan_list, 43 NULL, 44 }; 45 46 /*** private data structures ***/ 47 48 struct clk_parent_map { 49 const struct clk_hw *hw; 50 struct clk_core *core; 51 const char *fw_name; 52 const char *name; 53 int index; 54 }; 55 56 struct clk_core { 57 const char *name; 58 const struct clk_ops *ops; 59 struct clk_hw *hw; 60 struct module *owner; 61 struct device *dev; 62 struct device_node *of_node; 63 struct clk_core *parent; 64 struct clk_parent_map *parents; 65 u8 num_parents; 66 u8 new_parent_index; 67 unsigned long rate; 68 unsigned long req_rate; 69 unsigned long new_rate; 70 struct clk_core *new_parent; 71 struct clk_core *new_child; 72 unsigned long flags; 73 bool orphan; 74 bool rpm_enabled; 75 unsigned int enable_count; 76 unsigned int prepare_count; 77 unsigned int protect_count; 78 unsigned long min_rate; 79 unsigned long max_rate; 80 unsigned long accuracy; 81 int phase; 82 struct clk_duty duty; 83 struct hlist_head children; 84 struct hlist_node child_node; 85 struct hlist_head clks; 86 unsigned int notifier_count; 87 #ifdef CONFIG_DEBUG_FS 88 struct dentry *dentry; 89 struct hlist_node debug_node; 90 #endif 91 struct kref ref; 92 }; 93 94 #define CREATE_TRACE_POINTS 95 #include <trace/events/clk.h> 96 97 struct clk { 98 struct clk_core *core; 99 struct device *dev; 100 const char *dev_id; 101 const char *con_id; 102 unsigned long min_rate; 103 unsigned long max_rate; 104 unsigned int exclusive_count; 105 struct hlist_node clks_node; 106 }; 107 108 /*** runtime pm ***/ 109 static int clk_pm_runtime_get(struct clk_core *core) 110 { 111 if (!core->rpm_enabled) 112 return 0; 113 114 return pm_runtime_resume_and_get(core->dev); 115 } 116 117 static void clk_pm_runtime_put(struct clk_core *core) 118 { 119 if (!core->rpm_enabled) 120 return; 121 122 pm_runtime_put_sync(core->dev); 123 } 124 125 /*** locking ***/ 126 static void clk_prepare_lock(void) 127 { 128 if (!mutex_trylock(&prepare_lock)) { 129 if (prepare_owner == current) { 130 prepare_refcnt++; 131 return; 132 } 133 mutex_lock(&prepare_lock); 134 } 135 WARN_ON_ONCE(prepare_owner != NULL); 136 WARN_ON_ONCE(prepare_refcnt != 0); 137 prepare_owner = current; 138 prepare_refcnt = 1; 139 } 140 141 static void clk_prepare_unlock(void) 142 { 143 WARN_ON_ONCE(prepare_owner != current); 144 WARN_ON_ONCE(prepare_refcnt == 0); 145 146 if (--prepare_refcnt) 147 return; 148 prepare_owner = NULL; 149 mutex_unlock(&prepare_lock); 150 } 151 152 static unsigned long clk_enable_lock(void) 153 __acquires(enable_lock) 154 { 155 unsigned long flags; 156 157 /* 158 * On UP systems, spin_trylock_irqsave() always returns true, even if 159 * we already hold the lock. So, in that case, we rely only on 160 * reference counting. 161 */ 162 if (!IS_ENABLED(CONFIG_SMP) || 163 !spin_trylock_irqsave(&enable_lock, flags)) { 164 if (enable_owner == current) { 165 enable_refcnt++; 166 __acquire(enable_lock); 167 if (!IS_ENABLED(CONFIG_SMP)) 168 local_save_flags(flags); 169 return flags; 170 } 171 spin_lock_irqsave(&enable_lock, flags); 172 } 173 WARN_ON_ONCE(enable_owner != NULL); 174 WARN_ON_ONCE(enable_refcnt != 0); 175 enable_owner = current; 176 enable_refcnt = 1; 177 return flags; 178 } 179 180 static void clk_enable_unlock(unsigned long flags) 181 __releases(enable_lock) 182 { 183 WARN_ON_ONCE(enable_owner != current); 184 WARN_ON_ONCE(enable_refcnt == 0); 185 186 if (--enable_refcnt) { 187 __release(enable_lock); 188 return; 189 } 190 enable_owner = NULL; 191 spin_unlock_irqrestore(&enable_lock, flags); 192 } 193 194 static bool clk_core_rate_is_protected(struct clk_core *core) 195 { 196 return core->protect_count; 197 } 198 199 static bool clk_core_is_prepared(struct clk_core *core) 200 { 201 bool ret = false; 202 203 /* 204 * .is_prepared is optional for clocks that can prepare 205 * fall back to software usage counter if it is missing 206 */ 207 if (!core->ops->is_prepared) 208 return core->prepare_count; 209 210 if (!clk_pm_runtime_get(core)) { 211 ret = core->ops->is_prepared(core->hw); 212 clk_pm_runtime_put(core); 213 } 214 215 return ret; 216 } 217 218 static bool clk_core_is_enabled(struct clk_core *core) 219 { 220 bool ret = false; 221 222 /* 223 * .is_enabled is only mandatory for clocks that gate 224 * fall back to software usage counter if .is_enabled is missing 225 */ 226 if (!core->ops->is_enabled) 227 return core->enable_count; 228 229 /* 230 * Check if clock controller's device is runtime active before 231 * calling .is_enabled callback. If not, assume that clock is 232 * disabled, because we might be called from atomic context, from 233 * which pm_runtime_get() is not allowed. 234 * This function is called mainly from clk_disable_unused_subtree, 235 * which ensures proper runtime pm activation of controller before 236 * taking enable spinlock, but the below check is needed if one tries 237 * to call it from other places. 238 */ 239 if (core->rpm_enabled) { 240 pm_runtime_get_noresume(core->dev); 241 if (!pm_runtime_active(core->dev)) { 242 ret = false; 243 goto done; 244 } 245 } 246 247 ret = core->ops->is_enabled(core->hw); 248 done: 249 if (core->rpm_enabled) 250 pm_runtime_put(core->dev); 251 252 return ret; 253 } 254 255 /*** helper functions ***/ 256 257 const char *__clk_get_name(const struct clk *clk) 258 { 259 return !clk ? NULL : clk->core->name; 260 } 261 EXPORT_SYMBOL_GPL(__clk_get_name); 262 263 const char *clk_hw_get_name(const struct clk_hw *hw) 264 { 265 return hw->core->name; 266 } 267 EXPORT_SYMBOL_GPL(clk_hw_get_name); 268 269 struct clk_hw *__clk_get_hw(struct clk *clk) 270 { 271 return !clk ? NULL : clk->core->hw; 272 } 273 EXPORT_SYMBOL_GPL(__clk_get_hw); 274 275 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) 276 { 277 return hw->core->num_parents; 278 } 279 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); 280 281 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) 282 { 283 return hw->core->parent ? hw->core->parent->hw : NULL; 284 } 285 EXPORT_SYMBOL_GPL(clk_hw_get_parent); 286 287 static struct clk_core *__clk_lookup_subtree(const char *name, 288 struct clk_core *core) 289 { 290 struct clk_core *child; 291 struct clk_core *ret; 292 293 if (!strcmp(core->name, name)) 294 return core; 295 296 hlist_for_each_entry(child, &core->children, child_node) { 297 ret = __clk_lookup_subtree(name, child); 298 if (ret) 299 return ret; 300 } 301 302 return NULL; 303 } 304 305 static struct clk_core *clk_core_lookup(const char *name) 306 { 307 struct clk_core *root_clk; 308 struct clk_core *ret; 309 310 if (!name) 311 return NULL; 312 313 /* search the 'proper' clk tree first */ 314 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 315 ret = __clk_lookup_subtree(name, root_clk); 316 if (ret) 317 return ret; 318 } 319 320 /* if not found, then search the orphan tree */ 321 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 322 ret = __clk_lookup_subtree(name, root_clk); 323 if (ret) 324 return ret; 325 } 326 327 return NULL; 328 } 329 330 #ifdef CONFIG_OF 331 static int of_parse_clkspec(const struct device_node *np, int index, 332 const char *name, struct of_phandle_args *out_args); 333 static struct clk_hw * 334 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec); 335 #else 336 static inline int of_parse_clkspec(const struct device_node *np, int index, 337 const char *name, 338 struct of_phandle_args *out_args) 339 { 340 return -ENOENT; 341 } 342 static inline struct clk_hw * 343 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) 344 { 345 return ERR_PTR(-ENOENT); 346 } 347 #endif 348 349 /** 350 * clk_core_get - Find the clk_core parent of a clk 351 * @core: clk to find parent of 352 * @p_index: parent index to search for 353 * 354 * This is the preferred method for clk providers to find the parent of a 355 * clk when that parent is external to the clk controller. The parent_names 356 * array is indexed and treated as a local name matching a string in the device 357 * node's 'clock-names' property or as the 'con_id' matching the device's 358 * dev_name() in a clk_lookup. This allows clk providers to use their own 359 * namespace instead of looking for a globally unique parent string. 360 * 361 * For example the following DT snippet would allow a clock registered by the 362 * clock-controller@c001 that has a clk_init_data::parent_data array 363 * with 'xtal' in the 'name' member to find the clock provided by the 364 * clock-controller@f00abcd without needing to get the globally unique name of 365 * the xtal clk. 366 * 367 * parent: clock-controller@f00abcd { 368 * reg = <0xf00abcd 0xabcd>; 369 * #clock-cells = <0>; 370 * }; 371 * 372 * clock-controller@c001 { 373 * reg = <0xc001 0xf00d>; 374 * clocks = <&parent>; 375 * clock-names = "xtal"; 376 * #clock-cells = <1>; 377 * }; 378 * 379 * Returns: -ENOENT when the provider can't be found or the clk doesn't 380 * exist in the provider or the name can't be found in the DT node or 381 * in a clkdev lookup. NULL when the provider knows about the clk but it 382 * isn't provided on this system. 383 * A valid clk_core pointer when the clk can be found in the provider. 384 */ 385 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) 386 { 387 const char *name = core->parents[p_index].fw_name; 388 int index = core->parents[p_index].index; 389 struct clk_hw *hw = ERR_PTR(-ENOENT); 390 struct device *dev = core->dev; 391 const char *dev_id = dev ? dev_name(dev) : NULL; 392 struct device_node *np = core->of_node; 393 struct of_phandle_args clkspec; 394 395 if (np && (name || index >= 0) && 396 !of_parse_clkspec(np, index, name, &clkspec)) { 397 hw = of_clk_get_hw_from_clkspec(&clkspec); 398 of_node_put(clkspec.np); 399 } else if (name) { 400 /* 401 * If the DT search above couldn't find the provider fallback to 402 * looking up via clkdev based clk_lookups. 403 */ 404 hw = clk_find_hw(dev_id, name); 405 } 406 407 if (IS_ERR(hw)) 408 return ERR_CAST(hw); 409 410 return hw->core; 411 } 412 413 static void clk_core_fill_parent_index(struct clk_core *core, u8 index) 414 { 415 struct clk_parent_map *entry = &core->parents[index]; 416 struct clk_core *parent; 417 418 if (entry->hw) { 419 parent = entry->hw->core; 420 } else { 421 parent = clk_core_get(core, index); 422 if (PTR_ERR(parent) == -ENOENT && entry->name) 423 parent = clk_core_lookup(entry->name); 424 } 425 426 /* 427 * We have a direct reference but it isn't registered yet? 428 * Orphan it and let clk_reparent() update the orphan status 429 * when the parent is registered. 430 */ 431 if (!parent) 432 parent = ERR_PTR(-EPROBE_DEFER); 433 434 /* Only cache it if it's not an error */ 435 if (!IS_ERR(parent)) 436 entry->core = parent; 437 } 438 439 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, 440 u8 index) 441 { 442 if (!core || index >= core->num_parents || !core->parents) 443 return NULL; 444 445 if (!core->parents[index].core) 446 clk_core_fill_parent_index(core, index); 447 448 return core->parents[index].core; 449 } 450 451 struct clk_hw * 452 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) 453 { 454 struct clk_core *parent; 455 456 parent = clk_core_get_parent_by_index(hw->core, index); 457 458 return !parent ? NULL : parent->hw; 459 } 460 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); 461 462 unsigned int __clk_get_enable_count(struct clk *clk) 463 { 464 return !clk ? 0 : clk->core->enable_count; 465 } 466 467 static unsigned long clk_core_get_rate_nolock(struct clk_core *core) 468 { 469 if (!core) 470 return 0; 471 472 if (!core->num_parents || core->parent) 473 return core->rate; 474 475 /* 476 * Clk must have a parent because num_parents > 0 but the parent isn't 477 * known yet. Best to return 0 as the rate of this clk until we can 478 * properly recalc the rate based on the parent's rate. 479 */ 480 return 0; 481 } 482 483 unsigned long clk_hw_get_rate(const struct clk_hw *hw) 484 { 485 return clk_core_get_rate_nolock(hw->core); 486 } 487 EXPORT_SYMBOL_GPL(clk_hw_get_rate); 488 489 static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core) 490 { 491 if (!core) 492 return 0; 493 494 return core->accuracy; 495 } 496 497 unsigned long clk_hw_get_flags(const struct clk_hw *hw) 498 { 499 return hw->core->flags; 500 } 501 EXPORT_SYMBOL_GPL(clk_hw_get_flags); 502 503 bool clk_hw_is_prepared(const struct clk_hw *hw) 504 { 505 return clk_core_is_prepared(hw->core); 506 } 507 EXPORT_SYMBOL_GPL(clk_hw_is_prepared); 508 509 bool clk_hw_rate_is_protected(const struct clk_hw *hw) 510 { 511 return clk_core_rate_is_protected(hw->core); 512 } 513 EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected); 514 515 bool clk_hw_is_enabled(const struct clk_hw *hw) 516 { 517 return clk_core_is_enabled(hw->core); 518 } 519 EXPORT_SYMBOL_GPL(clk_hw_is_enabled); 520 521 bool __clk_is_enabled(struct clk *clk) 522 { 523 if (!clk) 524 return false; 525 526 return clk_core_is_enabled(clk->core); 527 } 528 EXPORT_SYMBOL_GPL(__clk_is_enabled); 529 530 static bool mux_is_better_rate(unsigned long rate, unsigned long now, 531 unsigned long best, unsigned long flags) 532 { 533 if (flags & CLK_MUX_ROUND_CLOSEST) 534 return abs(now - rate) < abs(best - rate); 535 536 return now <= rate && now > best; 537 } 538 539 static void clk_core_init_rate_req(struct clk_core * const core, 540 struct clk_rate_request *req, 541 unsigned long rate); 542 543 static int clk_core_round_rate_nolock(struct clk_core *core, 544 struct clk_rate_request *req); 545 546 static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent) 547 { 548 struct clk_core *tmp; 549 unsigned int i; 550 551 /* Optimize for the case where the parent is already the parent. */ 552 if (core->parent == parent) 553 return true; 554 555 for (i = 0; i < core->num_parents; i++) { 556 tmp = clk_core_get_parent_by_index(core, i); 557 if (!tmp) 558 continue; 559 560 if (tmp == parent) 561 return true; 562 } 563 564 return false; 565 } 566 567 static void 568 clk_core_forward_rate_req(struct clk_core *core, 569 const struct clk_rate_request *old_req, 570 struct clk_core *parent, 571 struct clk_rate_request *req, 572 unsigned long parent_rate) 573 { 574 if (WARN_ON(!clk_core_has_parent(core, parent))) 575 return; 576 577 clk_core_init_rate_req(parent, req, parent_rate); 578 579 if (req->min_rate < old_req->min_rate) 580 req->min_rate = old_req->min_rate; 581 582 if (req->max_rate > old_req->max_rate) 583 req->max_rate = old_req->max_rate; 584 } 585 586 int clk_mux_determine_rate_flags(struct clk_hw *hw, 587 struct clk_rate_request *req, 588 unsigned long flags) 589 { 590 struct clk_core *core = hw->core, *parent, *best_parent = NULL; 591 int i, num_parents, ret; 592 unsigned long best = 0; 593 594 /* if NO_REPARENT flag set, pass through to current parent */ 595 if (core->flags & CLK_SET_RATE_NO_REPARENT) { 596 parent = core->parent; 597 if (core->flags & CLK_SET_RATE_PARENT) { 598 struct clk_rate_request parent_req; 599 600 if (!parent) { 601 req->rate = 0; 602 return 0; 603 } 604 605 clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate); 606 ret = clk_core_round_rate_nolock(parent, &parent_req); 607 if (ret) 608 return ret; 609 610 best = parent_req.rate; 611 } else if (parent) { 612 best = clk_core_get_rate_nolock(parent); 613 } else { 614 best = clk_core_get_rate_nolock(core); 615 } 616 617 goto out; 618 } 619 620 /* find the parent that can provide the fastest rate <= rate */ 621 num_parents = core->num_parents; 622 for (i = 0; i < num_parents; i++) { 623 unsigned long parent_rate; 624 625 parent = clk_core_get_parent_by_index(core, i); 626 if (!parent) 627 continue; 628 629 if (core->flags & CLK_SET_RATE_PARENT) { 630 struct clk_rate_request parent_req; 631 632 clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate); 633 ret = clk_core_round_rate_nolock(parent, &parent_req); 634 if (ret) 635 continue; 636 637 parent_rate = parent_req.rate; 638 } else { 639 parent_rate = clk_core_get_rate_nolock(parent); 640 } 641 642 if (mux_is_better_rate(req->rate, parent_rate, 643 best, flags)) { 644 best_parent = parent; 645 best = parent_rate; 646 } 647 } 648 649 if (!best_parent) 650 return -EINVAL; 651 652 out: 653 if (best_parent) 654 req->best_parent_hw = best_parent->hw; 655 req->best_parent_rate = best; 656 req->rate = best; 657 658 return 0; 659 } 660 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); 661 662 struct clk *__clk_lookup(const char *name) 663 { 664 struct clk_core *core = clk_core_lookup(name); 665 666 return !core ? NULL : core->hw->clk; 667 } 668 669 static void clk_core_get_boundaries(struct clk_core *core, 670 unsigned long *min_rate, 671 unsigned long *max_rate) 672 { 673 struct clk *clk_user; 674 675 lockdep_assert_held(&prepare_lock); 676 677 *min_rate = core->min_rate; 678 *max_rate = core->max_rate; 679 680 hlist_for_each_entry(clk_user, &core->clks, clks_node) 681 *min_rate = max(*min_rate, clk_user->min_rate); 682 683 hlist_for_each_entry(clk_user, &core->clks, clks_node) 684 *max_rate = min(*max_rate, clk_user->max_rate); 685 } 686 687 /* 688 * clk_hw_get_rate_range() - returns the clock rate range for a hw clk 689 * @hw: the hw clk we want to get the range from 690 * @min_rate: pointer to the variable that will hold the minimum 691 * @max_rate: pointer to the variable that will hold the maximum 692 * 693 * Fills the @min_rate and @max_rate variables with the minimum and 694 * maximum that clock can reach. 695 */ 696 void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate, 697 unsigned long *max_rate) 698 { 699 clk_core_get_boundaries(hw->core, min_rate, max_rate); 700 } 701 EXPORT_SYMBOL_GPL(clk_hw_get_rate_range); 702 703 static bool clk_core_check_boundaries(struct clk_core *core, 704 unsigned long min_rate, 705 unsigned long max_rate) 706 { 707 struct clk *user; 708 709 lockdep_assert_held(&prepare_lock); 710 711 if (min_rate > core->max_rate || max_rate < core->min_rate) 712 return false; 713 714 hlist_for_each_entry(user, &core->clks, clks_node) 715 if (min_rate > user->max_rate || max_rate < user->min_rate) 716 return false; 717 718 return true; 719 } 720 721 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, 722 unsigned long max_rate) 723 { 724 hw->core->min_rate = min_rate; 725 hw->core->max_rate = max_rate; 726 } 727 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); 728 729 /* 730 * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk 731 * @hw: mux type clk to determine rate on 732 * @req: rate request, also used to return preferred parent and frequencies 733 * 734 * Helper for finding best parent to provide a given frequency. This can be used 735 * directly as a determine_rate callback (e.g. for a mux), or from a more 736 * complex clock that may combine a mux with other operations. 737 * 738 * Returns: 0 on success, -EERROR value on error 739 */ 740 int __clk_mux_determine_rate(struct clk_hw *hw, 741 struct clk_rate_request *req) 742 { 743 return clk_mux_determine_rate_flags(hw, req, 0); 744 } 745 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 746 747 int __clk_mux_determine_rate_closest(struct clk_hw *hw, 748 struct clk_rate_request *req) 749 { 750 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); 751 } 752 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); 753 754 /*** clk api ***/ 755 756 static void clk_core_rate_unprotect(struct clk_core *core) 757 { 758 lockdep_assert_held(&prepare_lock); 759 760 if (!core) 761 return; 762 763 if (WARN(core->protect_count == 0, 764 "%s already unprotected\n", core->name)) 765 return; 766 767 if (--core->protect_count > 0) 768 return; 769 770 clk_core_rate_unprotect(core->parent); 771 } 772 773 static int clk_core_rate_nuke_protect(struct clk_core *core) 774 { 775 int ret; 776 777 lockdep_assert_held(&prepare_lock); 778 779 if (!core) 780 return -EINVAL; 781 782 if (core->protect_count == 0) 783 return 0; 784 785 ret = core->protect_count; 786 core->protect_count = 1; 787 clk_core_rate_unprotect(core); 788 789 return ret; 790 } 791 792 /** 793 * clk_rate_exclusive_put - release exclusivity over clock rate control 794 * @clk: the clk over which the exclusivity is released 795 * 796 * clk_rate_exclusive_put() completes a critical section during which a clock 797 * consumer cannot tolerate any other consumer making any operation on the 798 * clock which could result in a rate change or rate glitch. Exclusive clocks 799 * cannot have their rate changed, either directly or indirectly due to changes 800 * further up the parent chain of clocks. As a result, clocks up parent chain 801 * also get under exclusive control of the calling consumer. 802 * 803 * If exlusivity is claimed more than once on clock, even by the same consumer, 804 * the rate effectively gets locked as exclusivity can't be preempted. 805 * 806 * Calls to clk_rate_exclusive_put() must be balanced with calls to 807 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return 808 * error status. 809 */ 810 void clk_rate_exclusive_put(struct clk *clk) 811 { 812 if (!clk) 813 return; 814 815 clk_prepare_lock(); 816 817 /* 818 * if there is something wrong with this consumer protect count, stop 819 * here before messing with the provider 820 */ 821 if (WARN_ON(clk->exclusive_count <= 0)) 822 goto out; 823 824 clk_core_rate_unprotect(clk->core); 825 clk->exclusive_count--; 826 out: 827 clk_prepare_unlock(); 828 } 829 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); 830 831 static void clk_core_rate_protect(struct clk_core *core) 832 { 833 lockdep_assert_held(&prepare_lock); 834 835 if (!core) 836 return; 837 838 if (core->protect_count == 0) 839 clk_core_rate_protect(core->parent); 840 841 core->protect_count++; 842 } 843 844 static void clk_core_rate_restore_protect(struct clk_core *core, int count) 845 { 846 lockdep_assert_held(&prepare_lock); 847 848 if (!core) 849 return; 850 851 if (count == 0) 852 return; 853 854 clk_core_rate_protect(core); 855 core->protect_count = count; 856 } 857 858 /** 859 * clk_rate_exclusive_get - get exclusivity over the clk rate control 860 * @clk: the clk over which the exclusity of rate control is requested 861 * 862 * clk_rate_exclusive_get() begins a critical section during which a clock 863 * consumer cannot tolerate any other consumer making any operation on the 864 * clock which could result in a rate change or rate glitch. Exclusive clocks 865 * cannot have their rate changed, either directly or indirectly due to changes 866 * further up the parent chain of clocks. As a result, clocks up parent chain 867 * also get under exclusive control of the calling consumer. 868 * 869 * If exlusivity is claimed more than once on clock, even by the same consumer, 870 * the rate effectively gets locked as exclusivity can't be preempted. 871 * 872 * Calls to clk_rate_exclusive_get() should be balanced with calls to 873 * clk_rate_exclusive_put(). Calls to this function may sleep. 874 * Returns 0 on success, -EERROR otherwise 875 */ 876 int clk_rate_exclusive_get(struct clk *clk) 877 { 878 if (!clk) 879 return 0; 880 881 clk_prepare_lock(); 882 clk_core_rate_protect(clk->core); 883 clk->exclusive_count++; 884 clk_prepare_unlock(); 885 886 return 0; 887 } 888 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); 889 890 static void clk_core_unprepare(struct clk_core *core) 891 { 892 lockdep_assert_held(&prepare_lock); 893 894 if (!core) 895 return; 896 897 if (WARN(core->prepare_count == 0, 898 "%s already unprepared\n", core->name)) 899 return; 900 901 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, 902 "Unpreparing critical %s\n", core->name)) 903 return; 904 905 if (core->flags & CLK_SET_RATE_GATE) 906 clk_core_rate_unprotect(core); 907 908 if (--core->prepare_count > 0) 909 return; 910 911 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name); 912 913 trace_clk_unprepare(core); 914 915 if (core->ops->unprepare) 916 core->ops->unprepare(core->hw); 917 918 trace_clk_unprepare_complete(core); 919 clk_core_unprepare(core->parent); 920 clk_pm_runtime_put(core); 921 } 922 923 static void clk_core_unprepare_lock(struct clk_core *core) 924 { 925 clk_prepare_lock(); 926 clk_core_unprepare(core); 927 clk_prepare_unlock(); 928 } 929 930 /** 931 * clk_unprepare - undo preparation of a clock source 932 * @clk: the clk being unprepared 933 * 934 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 935 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 936 * if the operation may sleep. One example is a clk which is accessed over 937 * I2c. In the complex case a clk gate operation may require a fast and a slow 938 * part. It is this reason that clk_unprepare and clk_disable are not mutually 939 * exclusive. In fact clk_disable must be called before clk_unprepare. 940 */ 941 void clk_unprepare(struct clk *clk) 942 { 943 if (IS_ERR_OR_NULL(clk)) 944 return; 945 946 clk_core_unprepare_lock(clk->core); 947 } 948 EXPORT_SYMBOL_GPL(clk_unprepare); 949 950 static int clk_core_prepare(struct clk_core *core) 951 { 952 int ret = 0; 953 954 lockdep_assert_held(&prepare_lock); 955 956 if (!core) 957 return 0; 958 959 if (core->prepare_count == 0) { 960 ret = clk_pm_runtime_get(core); 961 if (ret) 962 return ret; 963 964 ret = clk_core_prepare(core->parent); 965 if (ret) 966 goto runtime_put; 967 968 trace_clk_prepare(core); 969 970 if (core->ops->prepare) 971 ret = core->ops->prepare(core->hw); 972 973 trace_clk_prepare_complete(core); 974 975 if (ret) 976 goto unprepare; 977 } 978 979 core->prepare_count++; 980 981 /* 982 * CLK_SET_RATE_GATE is a special case of clock protection 983 * Instead of a consumer claiming exclusive rate control, it is 984 * actually the provider which prevents any consumer from making any 985 * operation which could result in a rate change or rate glitch while 986 * the clock is prepared. 987 */ 988 if (core->flags & CLK_SET_RATE_GATE) 989 clk_core_rate_protect(core); 990 991 return 0; 992 unprepare: 993 clk_core_unprepare(core->parent); 994 runtime_put: 995 clk_pm_runtime_put(core); 996 return ret; 997 } 998 999 static int clk_core_prepare_lock(struct clk_core *core) 1000 { 1001 int ret; 1002 1003 clk_prepare_lock(); 1004 ret = clk_core_prepare(core); 1005 clk_prepare_unlock(); 1006 1007 return ret; 1008 } 1009 1010 /** 1011 * clk_prepare - prepare a clock source 1012 * @clk: the clk being prepared 1013 * 1014 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 1015 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 1016 * operation may sleep. One example is a clk which is accessed over I2c. In 1017 * the complex case a clk ungate operation may require a fast and a slow part. 1018 * It is this reason that clk_prepare and clk_enable are not mutually 1019 * exclusive. In fact clk_prepare must be called before clk_enable. 1020 * Returns 0 on success, -EERROR otherwise. 1021 */ 1022 int clk_prepare(struct clk *clk) 1023 { 1024 if (!clk) 1025 return 0; 1026 1027 return clk_core_prepare_lock(clk->core); 1028 } 1029 EXPORT_SYMBOL_GPL(clk_prepare); 1030 1031 static void clk_core_disable(struct clk_core *core) 1032 { 1033 lockdep_assert_held(&enable_lock); 1034 1035 if (!core) 1036 return; 1037 1038 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name)) 1039 return; 1040 1041 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, 1042 "Disabling critical %s\n", core->name)) 1043 return; 1044 1045 if (--core->enable_count > 0) 1046 return; 1047 1048 trace_clk_disable_rcuidle(core); 1049 1050 if (core->ops->disable) 1051 core->ops->disable(core->hw); 1052 1053 trace_clk_disable_complete_rcuidle(core); 1054 1055 clk_core_disable(core->parent); 1056 } 1057 1058 static void clk_core_disable_lock(struct clk_core *core) 1059 { 1060 unsigned long flags; 1061 1062 flags = clk_enable_lock(); 1063 clk_core_disable(core); 1064 clk_enable_unlock(flags); 1065 } 1066 1067 /** 1068 * clk_disable - gate a clock 1069 * @clk: the clk being gated 1070 * 1071 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 1072 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 1073 * clk if the operation is fast and will never sleep. One example is a 1074 * SoC-internal clk which is controlled via simple register writes. In the 1075 * complex case a clk gate operation may require a fast and a slow part. It is 1076 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 1077 * In fact clk_disable must be called before clk_unprepare. 1078 */ 1079 void clk_disable(struct clk *clk) 1080 { 1081 if (IS_ERR_OR_NULL(clk)) 1082 return; 1083 1084 clk_core_disable_lock(clk->core); 1085 } 1086 EXPORT_SYMBOL_GPL(clk_disable); 1087 1088 static int clk_core_enable(struct clk_core *core) 1089 { 1090 int ret = 0; 1091 1092 lockdep_assert_held(&enable_lock); 1093 1094 if (!core) 1095 return 0; 1096 1097 if (WARN(core->prepare_count == 0, 1098 "Enabling unprepared %s\n", core->name)) 1099 return -ESHUTDOWN; 1100 1101 if (core->enable_count == 0) { 1102 ret = clk_core_enable(core->parent); 1103 1104 if (ret) 1105 return ret; 1106 1107 trace_clk_enable_rcuidle(core); 1108 1109 if (core->ops->enable) 1110 ret = core->ops->enable(core->hw); 1111 1112 trace_clk_enable_complete_rcuidle(core); 1113 1114 if (ret) { 1115 clk_core_disable(core->parent); 1116 return ret; 1117 } 1118 } 1119 1120 core->enable_count++; 1121 return 0; 1122 } 1123 1124 static int clk_core_enable_lock(struct clk_core *core) 1125 { 1126 unsigned long flags; 1127 int ret; 1128 1129 flags = clk_enable_lock(); 1130 ret = clk_core_enable(core); 1131 clk_enable_unlock(flags); 1132 1133 return ret; 1134 } 1135 1136 /** 1137 * clk_gate_restore_context - restore context for poweroff 1138 * @hw: the clk_hw pointer of clock whose state is to be restored 1139 * 1140 * The clock gate restore context function enables or disables 1141 * the gate clocks based on the enable_count. This is done in cases 1142 * where the clock context is lost and based on the enable_count 1143 * the clock either needs to be enabled/disabled. This 1144 * helps restore the state of gate clocks. 1145 */ 1146 void clk_gate_restore_context(struct clk_hw *hw) 1147 { 1148 struct clk_core *core = hw->core; 1149 1150 if (core->enable_count) 1151 core->ops->enable(hw); 1152 else 1153 core->ops->disable(hw); 1154 } 1155 EXPORT_SYMBOL_GPL(clk_gate_restore_context); 1156 1157 static int clk_core_save_context(struct clk_core *core) 1158 { 1159 struct clk_core *child; 1160 int ret = 0; 1161 1162 hlist_for_each_entry(child, &core->children, child_node) { 1163 ret = clk_core_save_context(child); 1164 if (ret < 0) 1165 return ret; 1166 } 1167 1168 if (core->ops && core->ops->save_context) 1169 ret = core->ops->save_context(core->hw); 1170 1171 return ret; 1172 } 1173 1174 static void clk_core_restore_context(struct clk_core *core) 1175 { 1176 struct clk_core *child; 1177 1178 if (core->ops && core->ops->restore_context) 1179 core->ops->restore_context(core->hw); 1180 1181 hlist_for_each_entry(child, &core->children, child_node) 1182 clk_core_restore_context(child); 1183 } 1184 1185 /** 1186 * clk_save_context - save clock context for poweroff 1187 * 1188 * Saves the context of the clock register for powerstates in which the 1189 * contents of the registers will be lost. Occurs deep within the suspend 1190 * code. Returns 0 on success. 1191 */ 1192 int clk_save_context(void) 1193 { 1194 struct clk_core *clk; 1195 int ret; 1196 1197 hlist_for_each_entry(clk, &clk_root_list, child_node) { 1198 ret = clk_core_save_context(clk); 1199 if (ret < 0) 1200 return ret; 1201 } 1202 1203 hlist_for_each_entry(clk, &clk_orphan_list, child_node) { 1204 ret = clk_core_save_context(clk); 1205 if (ret < 0) 1206 return ret; 1207 } 1208 1209 return 0; 1210 } 1211 EXPORT_SYMBOL_GPL(clk_save_context); 1212 1213 /** 1214 * clk_restore_context - restore clock context after poweroff 1215 * 1216 * Restore the saved clock context upon resume. 1217 * 1218 */ 1219 void clk_restore_context(void) 1220 { 1221 struct clk_core *core; 1222 1223 hlist_for_each_entry(core, &clk_root_list, child_node) 1224 clk_core_restore_context(core); 1225 1226 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1227 clk_core_restore_context(core); 1228 } 1229 EXPORT_SYMBOL_GPL(clk_restore_context); 1230 1231 /** 1232 * clk_enable - ungate a clock 1233 * @clk: the clk being ungated 1234 * 1235 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 1236 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 1237 * if the operation will never sleep. One example is a SoC-internal clk which 1238 * is controlled via simple register writes. In the complex case a clk ungate 1239 * operation may require a fast and a slow part. It is this reason that 1240 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 1241 * must be called before clk_enable. Returns 0 on success, -EERROR 1242 * otherwise. 1243 */ 1244 int clk_enable(struct clk *clk) 1245 { 1246 if (!clk) 1247 return 0; 1248 1249 return clk_core_enable_lock(clk->core); 1250 } 1251 EXPORT_SYMBOL_GPL(clk_enable); 1252 1253 /** 1254 * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. 1255 * @clk: clock source 1256 * 1257 * Returns true if clk_prepare() implicitly enables the clock, effectively 1258 * making clk_enable()/clk_disable() no-ops, false otherwise. 1259 * 1260 * This is of interest mainly to power management code where actually 1261 * disabling the clock also requires unpreparing it to have any material 1262 * effect. 1263 * 1264 * Regardless of the value returned here, the caller must always invoke 1265 * clk_enable() or clk_prepare_enable() and counterparts for usage counts 1266 * to be right. 1267 */ 1268 bool clk_is_enabled_when_prepared(struct clk *clk) 1269 { 1270 return clk && !(clk->core->ops->enable && clk->core->ops->disable); 1271 } 1272 EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared); 1273 1274 static int clk_core_prepare_enable(struct clk_core *core) 1275 { 1276 int ret; 1277 1278 ret = clk_core_prepare_lock(core); 1279 if (ret) 1280 return ret; 1281 1282 ret = clk_core_enable_lock(core); 1283 if (ret) 1284 clk_core_unprepare_lock(core); 1285 1286 return ret; 1287 } 1288 1289 static void clk_core_disable_unprepare(struct clk_core *core) 1290 { 1291 clk_core_disable_lock(core); 1292 clk_core_unprepare_lock(core); 1293 } 1294 1295 static void __init clk_unprepare_unused_subtree(struct clk_core *core) 1296 { 1297 struct clk_core *child; 1298 1299 lockdep_assert_held(&prepare_lock); 1300 1301 hlist_for_each_entry(child, &core->children, child_node) 1302 clk_unprepare_unused_subtree(child); 1303 1304 if (core->prepare_count) 1305 return; 1306 1307 if (core->flags & CLK_IGNORE_UNUSED) 1308 return; 1309 1310 if (clk_pm_runtime_get(core)) 1311 return; 1312 1313 if (clk_core_is_prepared(core)) { 1314 trace_clk_unprepare(core); 1315 if (core->ops->unprepare_unused) 1316 core->ops->unprepare_unused(core->hw); 1317 else if (core->ops->unprepare) 1318 core->ops->unprepare(core->hw); 1319 trace_clk_unprepare_complete(core); 1320 } 1321 1322 clk_pm_runtime_put(core); 1323 } 1324 1325 static void __init clk_disable_unused_subtree(struct clk_core *core) 1326 { 1327 struct clk_core *child; 1328 unsigned long flags; 1329 1330 lockdep_assert_held(&prepare_lock); 1331 1332 hlist_for_each_entry(child, &core->children, child_node) 1333 clk_disable_unused_subtree(child); 1334 1335 if (core->flags & CLK_OPS_PARENT_ENABLE) 1336 clk_core_prepare_enable(core->parent); 1337 1338 if (clk_pm_runtime_get(core)) 1339 goto unprepare_out; 1340 1341 flags = clk_enable_lock(); 1342 1343 if (core->enable_count) 1344 goto unlock_out; 1345 1346 if (core->flags & CLK_IGNORE_UNUSED) 1347 goto unlock_out; 1348 1349 /* 1350 * some gate clocks have special needs during the disable-unused 1351 * sequence. call .disable_unused if available, otherwise fall 1352 * back to .disable 1353 */ 1354 if (clk_core_is_enabled(core)) { 1355 trace_clk_disable(core); 1356 if (core->ops->disable_unused) 1357 core->ops->disable_unused(core->hw); 1358 else if (core->ops->disable) 1359 core->ops->disable(core->hw); 1360 trace_clk_disable_complete(core); 1361 } 1362 1363 unlock_out: 1364 clk_enable_unlock(flags); 1365 clk_pm_runtime_put(core); 1366 unprepare_out: 1367 if (core->flags & CLK_OPS_PARENT_ENABLE) 1368 clk_core_disable_unprepare(core->parent); 1369 } 1370 1371 static bool clk_ignore_unused __initdata; 1372 static int __init clk_ignore_unused_setup(char *__unused) 1373 { 1374 clk_ignore_unused = true; 1375 return 1; 1376 } 1377 __setup("clk_ignore_unused", clk_ignore_unused_setup); 1378 1379 static int __init clk_disable_unused(void) 1380 { 1381 struct clk_core *core; 1382 1383 if (clk_ignore_unused) { 1384 pr_warn("clk: Not disabling unused clocks\n"); 1385 return 0; 1386 } 1387 1388 clk_prepare_lock(); 1389 1390 hlist_for_each_entry(core, &clk_root_list, child_node) 1391 clk_disable_unused_subtree(core); 1392 1393 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1394 clk_disable_unused_subtree(core); 1395 1396 hlist_for_each_entry(core, &clk_root_list, child_node) 1397 clk_unprepare_unused_subtree(core); 1398 1399 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1400 clk_unprepare_unused_subtree(core); 1401 1402 clk_prepare_unlock(); 1403 1404 return 0; 1405 } 1406 late_initcall_sync(clk_disable_unused); 1407 1408 static int clk_core_determine_round_nolock(struct clk_core *core, 1409 struct clk_rate_request *req) 1410 { 1411 long rate; 1412 1413 lockdep_assert_held(&prepare_lock); 1414 1415 if (!core) 1416 return 0; 1417 1418 /* 1419 * Some clock providers hand-craft their clk_rate_requests and 1420 * might not fill min_rate and max_rate. 1421 * 1422 * If it's the case, clamping the rate is equivalent to setting 1423 * the rate to 0 which is bad. Skip the clamping but complain so 1424 * that it gets fixed, hopefully. 1425 */ 1426 if (!req->min_rate && !req->max_rate) 1427 pr_warn("%s: %s: clk_rate_request has initialized min or max rate.\n", 1428 __func__, core->name); 1429 else 1430 req->rate = clamp(req->rate, req->min_rate, req->max_rate); 1431 1432 /* 1433 * At this point, core protection will be disabled 1434 * - if the provider is not protected at all 1435 * - if the calling consumer is the only one which has exclusivity 1436 * over the provider 1437 */ 1438 if (clk_core_rate_is_protected(core)) { 1439 req->rate = core->rate; 1440 } else if (core->ops->determine_rate) { 1441 return core->ops->determine_rate(core->hw, req); 1442 } else if (core->ops->round_rate) { 1443 rate = core->ops->round_rate(core->hw, req->rate, 1444 &req->best_parent_rate); 1445 if (rate < 0) 1446 return rate; 1447 1448 req->rate = rate; 1449 } else { 1450 return -EINVAL; 1451 } 1452 1453 return 0; 1454 } 1455 1456 static void clk_core_init_rate_req(struct clk_core * const core, 1457 struct clk_rate_request *req, 1458 unsigned long rate) 1459 { 1460 struct clk_core *parent; 1461 1462 if (WARN_ON(!req)) 1463 return; 1464 1465 memset(req, 0, sizeof(*req)); 1466 req->max_rate = ULONG_MAX; 1467 1468 if (!core) 1469 return; 1470 1471 req->rate = rate; 1472 clk_core_get_boundaries(core, &req->min_rate, &req->max_rate); 1473 1474 parent = core->parent; 1475 if (parent) { 1476 req->best_parent_hw = parent->hw; 1477 req->best_parent_rate = parent->rate; 1478 } else { 1479 req->best_parent_hw = NULL; 1480 req->best_parent_rate = 0; 1481 } 1482 } 1483 1484 /** 1485 * clk_hw_init_rate_request - Initializes a clk_rate_request 1486 * @hw: the clk for which we want to submit a rate request 1487 * @req: the clk_rate_request structure we want to initialise 1488 * @rate: the rate which is to be requested 1489 * 1490 * Initializes a clk_rate_request structure to submit to 1491 * __clk_determine_rate() or similar functions. 1492 */ 1493 void clk_hw_init_rate_request(const struct clk_hw *hw, 1494 struct clk_rate_request *req, 1495 unsigned long rate) 1496 { 1497 if (WARN_ON(!hw || !req)) 1498 return; 1499 1500 clk_core_init_rate_req(hw->core, req, rate); 1501 } 1502 EXPORT_SYMBOL_GPL(clk_hw_init_rate_request); 1503 1504 /** 1505 * clk_hw_forward_rate_request - Forwards a clk_rate_request to a clock's parent 1506 * @hw: the original clock that got the rate request 1507 * @old_req: the original clk_rate_request structure we want to forward 1508 * @parent: the clk we want to forward @old_req to 1509 * @req: the clk_rate_request structure we want to initialise 1510 * @parent_rate: The rate which is to be requested to @parent 1511 * 1512 * Initializes a clk_rate_request structure to submit to a clock parent 1513 * in __clk_determine_rate() or similar functions. 1514 */ 1515 void clk_hw_forward_rate_request(const struct clk_hw *hw, 1516 const struct clk_rate_request *old_req, 1517 const struct clk_hw *parent, 1518 struct clk_rate_request *req, 1519 unsigned long parent_rate) 1520 { 1521 if (WARN_ON(!hw || !old_req || !parent || !req)) 1522 return; 1523 1524 clk_core_forward_rate_req(hw->core, old_req, 1525 parent->core, req, 1526 parent_rate); 1527 } 1528 1529 static bool clk_core_can_round(struct clk_core * const core) 1530 { 1531 return core->ops->determine_rate || core->ops->round_rate; 1532 } 1533 1534 static int clk_core_round_rate_nolock(struct clk_core *core, 1535 struct clk_rate_request *req) 1536 { 1537 int ret; 1538 1539 lockdep_assert_held(&prepare_lock); 1540 1541 if (!core) { 1542 req->rate = 0; 1543 return 0; 1544 } 1545 1546 if (clk_core_can_round(core)) 1547 return clk_core_determine_round_nolock(core, req); 1548 1549 if (core->flags & CLK_SET_RATE_PARENT) { 1550 struct clk_rate_request parent_req; 1551 1552 clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate); 1553 ret = clk_core_round_rate_nolock(core->parent, &parent_req); 1554 if (ret) 1555 return ret; 1556 1557 req->best_parent_rate = parent_req.rate; 1558 req->rate = parent_req.rate; 1559 1560 return 0; 1561 } 1562 1563 req->rate = core->rate; 1564 return 0; 1565 } 1566 1567 /** 1568 * __clk_determine_rate - get the closest rate actually supported by a clock 1569 * @hw: determine the rate of this clock 1570 * @req: target rate request 1571 * 1572 * Useful for clk_ops such as .set_rate and .determine_rate. 1573 */ 1574 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 1575 { 1576 if (!hw) { 1577 req->rate = 0; 1578 return 0; 1579 } 1580 1581 return clk_core_round_rate_nolock(hw->core, req); 1582 } 1583 EXPORT_SYMBOL_GPL(__clk_determine_rate); 1584 1585 /** 1586 * clk_hw_round_rate() - round the given rate for a hw clk 1587 * @hw: the hw clk for which we are rounding a rate 1588 * @rate: the rate which is to be rounded 1589 * 1590 * Takes in a rate as input and rounds it to a rate that the clk can actually 1591 * use. 1592 * 1593 * Context: prepare_lock must be held. 1594 * For clk providers to call from within clk_ops such as .round_rate, 1595 * .determine_rate. 1596 * 1597 * Return: returns rounded rate of hw clk if clk supports round_rate operation 1598 * else returns the parent rate. 1599 */ 1600 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) 1601 { 1602 int ret; 1603 struct clk_rate_request req; 1604 1605 clk_core_init_rate_req(hw->core, &req, rate); 1606 1607 ret = clk_core_round_rate_nolock(hw->core, &req); 1608 if (ret) 1609 return 0; 1610 1611 return req.rate; 1612 } 1613 EXPORT_SYMBOL_GPL(clk_hw_round_rate); 1614 1615 /** 1616 * clk_round_rate - round the given rate for a clk 1617 * @clk: the clk for which we are rounding a rate 1618 * @rate: the rate which is to be rounded 1619 * 1620 * Takes in a rate as input and rounds it to a rate that the clk can actually 1621 * use which is then returned. If clk doesn't support round_rate operation 1622 * then the parent rate is returned. 1623 */ 1624 long clk_round_rate(struct clk *clk, unsigned long rate) 1625 { 1626 struct clk_rate_request req; 1627 int ret; 1628 1629 if (!clk) 1630 return 0; 1631 1632 clk_prepare_lock(); 1633 1634 if (clk->exclusive_count) 1635 clk_core_rate_unprotect(clk->core); 1636 1637 clk_core_init_rate_req(clk->core, &req, rate); 1638 1639 ret = clk_core_round_rate_nolock(clk->core, &req); 1640 1641 if (clk->exclusive_count) 1642 clk_core_rate_protect(clk->core); 1643 1644 clk_prepare_unlock(); 1645 1646 if (ret) 1647 return ret; 1648 1649 return req.rate; 1650 } 1651 EXPORT_SYMBOL_GPL(clk_round_rate); 1652 1653 /** 1654 * __clk_notify - call clk notifier chain 1655 * @core: clk that is changing rate 1656 * @msg: clk notifier type (see include/linux/clk.h) 1657 * @old_rate: old clk rate 1658 * @new_rate: new clk rate 1659 * 1660 * Triggers a notifier call chain on the clk rate-change notification 1661 * for 'clk'. Passes a pointer to the struct clk and the previous 1662 * and current rates to the notifier callback. Intended to be called by 1663 * internal clock code only. Returns NOTIFY_DONE from the last driver 1664 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1665 * a driver returns that. 1666 */ 1667 static int __clk_notify(struct clk_core *core, unsigned long msg, 1668 unsigned long old_rate, unsigned long new_rate) 1669 { 1670 struct clk_notifier *cn; 1671 struct clk_notifier_data cnd; 1672 int ret = NOTIFY_DONE; 1673 1674 cnd.old_rate = old_rate; 1675 cnd.new_rate = new_rate; 1676 1677 list_for_each_entry(cn, &clk_notifier_list, node) { 1678 if (cn->clk->core == core) { 1679 cnd.clk = cn->clk; 1680 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1681 &cnd); 1682 if (ret & NOTIFY_STOP_MASK) 1683 return ret; 1684 } 1685 } 1686 1687 return ret; 1688 } 1689 1690 /** 1691 * __clk_recalc_accuracies 1692 * @core: first clk in the subtree 1693 * 1694 * Walks the subtree of clks starting with clk and recalculates accuracies as 1695 * it goes. Note that if a clk does not implement the .recalc_accuracy 1696 * callback then it is assumed that the clock will take on the accuracy of its 1697 * parent. 1698 */ 1699 static void __clk_recalc_accuracies(struct clk_core *core) 1700 { 1701 unsigned long parent_accuracy = 0; 1702 struct clk_core *child; 1703 1704 lockdep_assert_held(&prepare_lock); 1705 1706 if (core->parent) 1707 parent_accuracy = core->parent->accuracy; 1708 1709 if (core->ops->recalc_accuracy) 1710 core->accuracy = core->ops->recalc_accuracy(core->hw, 1711 parent_accuracy); 1712 else 1713 core->accuracy = parent_accuracy; 1714 1715 hlist_for_each_entry(child, &core->children, child_node) 1716 __clk_recalc_accuracies(child); 1717 } 1718 1719 static long clk_core_get_accuracy_recalc(struct clk_core *core) 1720 { 1721 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) 1722 __clk_recalc_accuracies(core); 1723 1724 return clk_core_get_accuracy_no_lock(core); 1725 } 1726 1727 /** 1728 * clk_get_accuracy - return the accuracy of clk 1729 * @clk: the clk whose accuracy is being returned 1730 * 1731 * Simply returns the cached accuracy of the clk, unless 1732 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be 1733 * issued. 1734 * If clk is NULL then returns 0. 1735 */ 1736 long clk_get_accuracy(struct clk *clk) 1737 { 1738 long accuracy; 1739 1740 if (!clk) 1741 return 0; 1742 1743 clk_prepare_lock(); 1744 accuracy = clk_core_get_accuracy_recalc(clk->core); 1745 clk_prepare_unlock(); 1746 1747 return accuracy; 1748 } 1749 EXPORT_SYMBOL_GPL(clk_get_accuracy); 1750 1751 static unsigned long clk_recalc(struct clk_core *core, 1752 unsigned long parent_rate) 1753 { 1754 unsigned long rate = parent_rate; 1755 1756 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { 1757 rate = core->ops->recalc_rate(core->hw, parent_rate); 1758 clk_pm_runtime_put(core); 1759 } 1760 return rate; 1761 } 1762 1763 /** 1764 * __clk_recalc_rates 1765 * @core: first clk in the subtree 1766 * @update_req: Whether req_rate should be updated with the new rate 1767 * @msg: notification type (see include/linux/clk.h) 1768 * 1769 * Walks the subtree of clks starting with clk and recalculates rates as it 1770 * goes. Note that if a clk does not implement the .recalc_rate callback then 1771 * it is assumed that the clock will take on the rate of its parent. 1772 * 1773 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1774 * if necessary. 1775 */ 1776 static void __clk_recalc_rates(struct clk_core *core, bool update_req, 1777 unsigned long msg) 1778 { 1779 unsigned long old_rate; 1780 unsigned long parent_rate = 0; 1781 struct clk_core *child; 1782 1783 lockdep_assert_held(&prepare_lock); 1784 1785 old_rate = core->rate; 1786 1787 if (core->parent) 1788 parent_rate = core->parent->rate; 1789 1790 core->rate = clk_recalc(core, parent_rate); 1791 if (update_req) 1792 core->req_rate = core->rate; 1793 1794 /* 1795 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1796 * & ABORT_RATE_CHANGE notifiers 1797 */ 1798 if (core->notifier_count && msg) 1799 __clk_notify(core, msg, old_rate, core->rate); 1800 1801 hlist_for_each_entry(child, &core->children, child_node) 1802 __clk_recalc_rates(child, update_req, msg); 1803 } 1804 1805 static unsigned long clk_core_get_rate_recalc(struct clk_core *core) 1806 { 1807 if (core && (core->flags & CLK_GET_RATE_NOCACHE)) 1808 __clk_recalc_rates(core, false, 0); 1809 1810 return clk_core_get_rate_nolock(core); 1811 } 1812 1813 /** 1814 * clk_get_rate - return the rate of clk 1815 * @clk: the clk whose rate is being returned 1816 * 1817 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 1818 * is set, which means a recalc_rate will be issued. Can be called regardless of 1819 * the clock enabledness. If clk is NULL, or if an error occurred, then returns 1820 * 0. 1821 */ 1822 unsigned long clk_get_rate(struct clk *clk) 1823 { 1824 unsigned long rate; 1825 1826 if (!clk) 1827 return 0; 1828 1829 clk_prepare_lock(); 1830 rate = clk_core_get_rate_recalc(clk->core); 1831 clk_prepare_unlock(); 1832 1833 return rate; 1834 } 1835 EXPORT_SYMBOL_GPL(clk_get_rate); 1836 1837 static int clk_fetch_parent_index(struct clk_core *core, 1838 struct clk_core *parent) 1839 { 1840 int i; 1841 1842 if (!parent) 1843 return -EINVAL; 1844 1845 for (i = 0; i < core->num_parents; i++) { 1846 /* Found it first try! */ 1847 if (core->parents[i].core == parent) 1848 return i; 1849 1850 /* Something else is here, so keep looking */ 1851 if (core->parents[i].core) 1852 continue; 1853 1854 /* Maybe core hasn't been cached but the hw is all we know? */ 1855 if (core->parents[i].hw) { 1856 if (core->parents[i].hw == parent->hw) 1857 break; 1858 1859 /* Didn't match, but we're expecting a clk_hw */ 1860 continue; 1861 } 1862 1863 /* Maybe it hasn't been cached (clk_set_parent() path) */ 1864 if (parent == clk_core_get(core, i)) 1865 break; 1866 1867 /* Fallback to comparing globally unique names */ 1868 if (core->parents[i].name && 1869 !strcmp(parent->name, core->parents[i].name)) 1870 break; 1871 } 1872 1873 if (i == core->num_parents) 1874 return -EINVAL; 1875 1876 core->parents[i].core = parent; 1877 return i; 1878 } 1879 1880 /** 1881 * clk_hw_get_parent_index - return the index of the parent clock 1882 * @hw: clk_hw associated with the clk being consumed 1883 * 1884 * Fetches and returns the index of parent clock. Returns -EINVAL if the given 1885 * clock does not have a current parent. 1886 */ 1887 int clk_hw_get_parent_index(struct clk_hw *hw) 1888 { 1889 struct clk_hw *parent = clk_hw_get_parent(hw); 1890 1891 if (WARN_ON(parent == NULL)) 1892 return -EINVAL; 1893 1894 return clk_fetch_parent_index(hw->core, parent->core); 1895 } 1896 EXPORT_SYMBOL_GPL(clk_hw_get_parent_index); 1897 1898 /* 1899 * Update the orphan status of @core and all its children. 1900 */ 1901 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) 1902 { 1903 struct clk_core *child; 1904 1905 core->orphan = is_orphan; 1906 1907 hlist_for_each_entry(child, &core->children, child_node) 1908 clk_core_update_orphan_status(child, is_orphan); 1909 } 1910 1911 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) 1912 { 1913 bool was_orphan = core->orphan; 1914 1915 hlist_del(&core->child_node); 1916 1917 if (new_parent) { 1918 bool becomes_orphan = new_parent->orphan; 1919 1920 /* avoid duplicate POST_RATE_CHANGE notifications */ 1921 if (new_parent->new_child == core) 1922 new_parent->new_child = NULL; 1923 1924 hlist_add_head(&core->child_node, &new_parent->children); 1925 1926 if (was_orphan != becomes_orphan) 1927 clk_core_update_orphan_status(core, becomes_orphan); 1928 } else { 1929 hlist_add_head(&core->child_node, &clk_orphan_list); 1930 if (!was_orphan) 1931 clk_core_update_orphan_status(core, true); 1932 } 1933 1934 core->parent = new_parent; 1935 } 1936 1937 static struct clk_core *__clk_set_parent_before(struct clk_core *core, 1938 struct clk_core *parent) 1939 { 1940 unsigned long flags; 1941 struct clk_core *old_parent = core->parent; 1942 1943 /* 1944 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock 1945 * 1946 * 2. Migrate prepare state between parents and prevent race with 1947 * clk_enable(). 1948 * 1949 * If the clock is not prepared, then a race with 1950 * clk_enable/disable() is impossible since we already have the 1951 * prepare lock (future calls to clk_enable() need to be preceded by 1952 * a clk_prepare()). 1953 * 1954 * If the clock is prepared, migrate the prepared state to the new 1955 * parent and also protect against a race with clk_enable() by 1956 * forcing the clock and the new parent on. This ensures that all 1957 * future calls to clk_enable() are practically NOPs with respect to 1958 * hardware and software states. 1959 * 1960 * See also: Comment for clk_set_parent() below. 1961 */ 1962 1963 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ 1964 if (core->flags & CLK_OPS_PARENT_ENABLE) { 1965 clk_core_prepare_enable(old_parent); 1966 clk_core_prepare_enable(parent); 1967 } 1968 1969 /* migrate prepare count if > 0 */ 1970 if (core->prepare_count) { 1971 clk_core_prepare_enable(parent); 1972 clk_core_enable_lock(core); 1973 } 1974 1975 /* update the clk tree topology */ 1976 flags = clk_enable_lock(); 1977 clk_reparent(core, parent); 1978 clk_enable_unlock(flags); 1979 1980 return old_parent; 1981 } 1982 1983 static void __clk_set_parent_after(struct clk_core *core, 1984 struct clk_core *parent, 1985 struct clk_core *old_parent) 1986 { 1987 /* 1988 * Finish the migration of prepare state and undo the changes done 1989 * for preventing a race with clk_enable(). 1990 */ 1991 if (core->prepare_count) { 1992 clk_core_disable_lock(core); 1993 clk_core_disable_unprepare(old_parent); 1994 } 1995 1996 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ 1997 if (core->flags & CLK_OPS_PARENT_ENABLE) { 1998 clk_core_disable_unprepare(parent); 1999 clk_core_disable_unprepare(old_parent); 2000 } 2001 } 2002 2003 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, 2004 u8 p_index) 2005 { 2006 unsigned long flags; 2007 int ret = 0; 2008 struct clk_core *old_parent; 2009 2010 old_parent = __clk_set_parent_before(core, parent); 2011 2012 trace_clk_set_parent(core, parent); 2013 2014 /* change clock input source */ 2015 if (parent && core->ops->set_parent) 2016 ret = core->ops->set_parent(core->hw, p_index); 2017 2018 trace_clk_set_parent_complete(core, parent); 2019 2020 if (ret) { 2021 flags = clk_enable_lock(); 2022 clk_reparent(core, old_parent); 2023 clk_enable_unlock(flags); 2024 2025 __clk_set_parent_after(core, old_parent, parent); 2026 2027 return ret; 2028 } 2029 2030 __clk_set_parent_after(core, parent, old_parent); 2031 2032 return 0; 2033 } 2034 2035 /** 2036 * __clk_speculate_rates 2037 * @core: first clk in the subtree 2038 * @parent_rate: the "future" rate of clk's parent 2039 * 2040 * Walks the subtree of clks starting with clk, speculating rates as it 2041 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 2042 * 2043 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 2044 * pre-rate change notifications and returns early if no clks in the 2045 * subtree have subscribed to the notifications. Note that if a clk does not 2046 * implement the .recalc_rate callback then it is assumed that the clock will 2047 * take on the rate of its parent. 2048 */ 2049 static int __clk_speculate_rates(struct clk_core *core, 2050 unsigned long parent_rate) 2051 { 2052 struct clk_core *child; 2053 unsigned long new_rate; 2054 int ret = NOTIFY_DONE; 2055 2056 lockdep_assert_held(&prepare_lock); 2057 2058 new_rate = clk_recalc(core, parent_rate); 2059 2060 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 2061 if (core->notifier_count) 2062 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); 2063 2064 if (ret & NOTIFY_STOP_MASK) { 2065 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", 2066 __func__, core->name, ret); 2067 goto out; 2068 } 2069 2070 hlist_for_each_entry(child, &core->children, child_node) { 2071 ret = __clk_speculate_rates(child, new_rate); 2072 if (ret & NOTIFY_STOP_MASK) 2073 break; 2074 } 2075 2076 out: 2077 return ret; 2078 } 2079 2080 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, 2081 struct clk_core *new_parent, u8 p_index) 2082 { 2083 struct clk_core *child; 2084 2085 core->new_rate = new_rate; 2086 core->new_parent = new_parent; 2087 core->new_parent_index = p_index; 2088 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 2089 core->new_child = NULL; 2090 if (new_parent && new_parent != core->parent) 2091 new_parent->new_child = core; 2092 2093 hlist_for_each_entry(child, &core->children, child_node) { 2094 child->new_rate = clk_recalc(child, new_rate); 2095 clk_calc_subtree(child, child->new_rate, NULL, 0); 2096 } 2097 } 2098 2099 /* 2100 * calculate the new rates returning the topmost clock that has to be 2101 * changed. 2102 */ 2103 static struct clk_core *clk_calc_new_rates(struct clk_core *core, 2104 unsigned long rate) 2105 { 2106 struct clk_core *top = core; 2107 struct clk_core *old_parent, *parent; 2108 unsigned long best_parent_rate = 0; 2109 unsigned long new_rate; 2110 unsigned long min_rate; 2111 unsigned long max_rate; 2112 int p_index = 0; 2113 long ret; 2114 2115 /* sanity */ 2116 if (IS_ERR_OR_NULL(core)) 2117 return NULL; 2118 2119 /* save parent rate, if it exists */ 2120 parent = old_parent = core->parent; 2121 if (parent) 2122 best_parent_rate = parent->rate; 2123 2124 clk_core_get_boundaries(core, &min_rate, &max_rate); 2125 2126 /* find the closest rate and parent clk/rate */ 2127 if (clk_core_can_round(core)) { 2128 struct clk_rate_request req; 2129 2130 clk_core_init_rate_req(core, &req, rate); 2131 2132 ret = clk_core_determine_round_nolock(core, &req); 2133 if (ret < 0) 2134 return NULL; 2135 2136 best_parent_rate = req.best_parent_rate; 2137 new_rate = req.rate; 2138 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; 2139 2140 if (new_rate < min_rate || new_rate > max_rate) 2141 return NULL; 2142 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { 2143 /* pass-through clock without adjustable parent */ 2144 core->new_rate = core->rate; 2145 return NULL; 2146 } else { 2147 /* pass-through clock with adjustable parent */ 2148 top = clk_calc_new_rates(parent, rate); 2149 new_rate = parent->new_rate; 2150 goto out; 2151 } 2152 2153 /* some clocks must be gated to change parent */ 2154 if (parent != old_parent && 2155 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { 2156 pr_debug("%s: %s not gated but wants to reparent\n", 2157 __func__, core->name); 2158 return NULL; 2159 } 2160 2161 /* try finding the new parent index */ 2162 if (parent && core->num_parents > 1) { 2163 p_index = clk_fetch_parent_index(core, parent); 2164 if (p_index < 0) { 2165 pr_debug("%s: clk %s can not be parent of clk %s\n", 2166 __func__, parent->name, core->name); 2167 return NULL; 2168 } 2169 } 2170 2171 if ((core->flags & CLK_SET_RATE_PARENT) && parent && 2172 best_parent_rate != parent->rate) 2173 top = clk_calc_new_rates(parent, best_parent_rate); 2174 2175 out: 2176 clk_calc_subtree(core, new_rate, parent, p_index); 2177 2178 return top; 2179 } 2180 2181 /* 2182 * Notify about rate changes in a subtree. Always walk down the whole tree 2183 * so that in case of an error we can walk down the whole tree again and 2184 * abort the change. 2185 */ 2186 static struct clk_core *clk_propagate_rate_change(struct clk_core *core, 2187 unsigned long event) 2188 { 2189 struct clk_core *child, *tmp_clk, *fail_clk = NULL; 2190 int ret = NOTIFY_DONE; 2191 2192 if (core->rate == core->new_rate) 2193 return NULL; 2194 2195 if (core->notifier_count) { 2196 ret = __clk_notify(core, event, core->rate, core->new_rate); 2197 if (ret & NOTIFY_STOP_MASK) 2198 fail_clk = core; 2199 } 2200 2201 hlist_for_each_entry(child, &core->children, child_node) { 2202 /* Skip children who will be reparented to another clock */ 2203 if (child->new_parent && child->new_parent != core) 2204 continue; 2205 tmp_clk = clk_propagate_rate_change(child, event); 2206 if (tmp_clk) 2207 fail_clk = tmp_clk; 2208 } 2209 2210 /* handle the new child who might not be in core->children yet */ 2211 if (core->new_child) { 2212 tmp_clk = clk_propagate_rate_change(core->new_child, event); 2213 if (tmp_clk) 2214 fail_clk = tmp_clk; 2215 } 2216 2217 return fail_clk; 2218 } 2219 2220 /* 2221 * walk down a subtree and set the new rates notifying the rate 2222 * change on the way 2223 */ 2224 static void clk_change_rate(struct clk_core *core) 2225 { 2226 struct clk_core *child; 2227 struct hlist_node *tmp; 2228 unsigned long old_rate; 2229 unsigned long best_parent_rate = 0; 2230 bool skip_set_rate = false; 2231 struct clk_core *old_parent; 2232 struct clk_core *parent = NULL; 2233 2234 old_rate = core->rate; 2235 2236 if (core->new_parent) { 2237 parent = core->new_parent; 2238 best_parent_rate = core->new_parent->rate; 2239 } else if (core->parent) { 2240 parent = core->parent; 2241 best_parent_rate = core->parent->rate; 2242 } 2243 2244 if (clk_pm_runtime_get(core)) 2245 return; 2246 2247 if (core->flags & CLK_SET_RATE_UNGATE) { 2248 clk_core_prepare(core); 2249 clk_core_enable_lock(core); 2250 } 2251 2252 if (core->new_parent && core->new_parent != core->parent) { 2253 old_parent = __clk_set_parent_before(core, core->new_parent); 2254 trace_clk_set_parent(core, core->new_parent); 2255 2256 if (core->ops->set_rate_and_parent) { 2257 skip_set_rate = true; 2258 core->ops->set_rate_and_parent(core->hw, core->new_rate, 2259 best_parent_rate, 2260 core->new_parent_index); 2261 } else if (core->ops->set_parent) { 2262 core->ops->set_parent(core->hw, core->new_parent_index); 2263 } 2264 2265 trace_clk_set_parent_complete(core, core->new_parent); 2266 __clk_set_parent_after(core, core->new_parent, old_parent); 2267 } 2268 2269 if (core->flags & CLK_OPS_PARENT_ENABLE) 2270 clk_core_prepare_enable(parent); 2271 2272 trace_clk_set_rate(core, core->new_rate); 2273 2274 if (!skip_set_rate && core->ops->set_rate) 2275 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); 2276 2277 trace_clk_set_rate_complete(core, core->new_rate); 2278 2279 core->rate = clk_recalc(core, best_parent_rate); 2280 2281 if (core->flags & CLK_SET_RATE_UNGATE) { 2282 clk_core_disable_lock(core); 2283 clk_core_unprepare(core); 2284 } 2285 2286 if (core->flags & CLK_OPS_PARENT_ENABLE) 2287 clk_core_disable_unprepare(parent); 2288 2289 if (core->notifier_count && old_rate != core->rate) 2290 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); 2291 2292 if (core->flags & CLK_RECALC_NEW_RATES) 2293 (void)clk_calc_new_rates(core, core->new_rate); 2294 2295 /* 2296 * Use safe iteration, as change_rate can actually swap parents 2297 * for certain clock types. 2298 */ 2299 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { 2300 /* Skip children who will be reparented to another clock */ 2301 if (child->new_parent && child->new_parent != core) 2302 continue; 2303 clk_change_rate(child); 2304 } 2305 2306 /* handle the new child who might not be in core->children yet */ 2307 if (core->new_child) 2308 clk_change_rate(core->new_child); 2309 2310 clk_pm_runtime_put(core); 2311 } 2312 2313 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, 2314 unsigned long req_rate) 2315 { 2316 int ret, cnt; 2317 struct clk_rate_request req; 2318 2319 lockdep_assert_held(&prepare_lock); 2320 2321 if (!core) 2322 return 0; 2323 2324 /* simulate what the rate would be if it could be freely set */ 2325 cnt = clk_core_rate_nuke_protect(core); 2326 if (cnt < 0) 2327 return cnt; 2328 2329 clk_core_init_rate_req(core, &req, req_rate); 2330 2331 ret = clk_core_round_rate_nolock(core, &req); 2332 2333 /* restore the protection */ 2334 clk_core_rate_restore_protect(core, cnt); 2335 2336 return ret ? 0 : req.rate; 2337 } 2338 2339 static int clk_core_set_rate_nolock(struct clk_core *core, 2340 unsigned long req_rate) 2341 { 2342 struct clk_core *top, *fail_clk; 2343 unsigned long rate; 2344 int ret; 2345 2346 if (!core) 2347 return 0; 2348 2349 rate = clk_core_req_round_rate_nolock(core, req_rate); 2350 2351 /* bail early if nothing to do */ 2352 if (rate == clk_core_get_rate_nolock(core)) 2353 return 0; 2354 2355 /* fail on a direct rate set of a protected provider */ 2356 if (clk_core_rate_is_protected(core)) 2357 return -EBUSY; 2358 2359 /* calculate new rates and get the topmost changed clock */ 2360 top = clk_calc_new_rates(core, req_rate); 2361 if (!top) 2362 return -EINVAL; 2363 2364 ret = clk_pm_runtime_get(core); 2365 if (ret) 2366 return ret; 2367 2368 /* notify that we are about to change rates */ 2369 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 2370 if (fail_clk) { 2371 pr_debug("%s: failed to set %s rate\n", __func__, 2372 fail_clk->name); 2373 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 2374 ret = -EBUSY; 2375 goto err; 2376 } 2377 2378 /* change the rates */ 2379 clk_change_rate(top); 2380 2381 core->req_rate = req_rate; 2382 err: 2383 clk_pm_runtime_put(core); 2384 2385 return ret; 2386 } 2387 2388 /** 2389 * clk_set_rate - specify a new rate for clk 2390 * @clk: the clk whose rate is being changed 2391 * @rate: the new rate for clk 2392 * 2393 * In the simplest case clk_set_rate will only adjust the rate of clk. 2394 * 2395 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 2396 * propagate up to clk's parent; whether or not this happens depends on the 2397 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 2398 * after calling .round_rate then upstream parent propagation is ignored. If 2399 * *parent_rate comes back with a new rate for clk's parent then we propagate 2400 * up to clk's parent and set its rate. Upward propagation will continue 2401 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 2402 * .round_rate stops requesting changes to clk's parent_rate. 2403 * 2404 * Rate changes are accomplished via tree traversal that also recalculates the 2405 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 2406 * 2407 * Returns 0 on success, -EERROR otherwise. 2408 */ 2409 int clk_set_rate(struct clk *clk, unsigned long rate) 2410 { 2411 int ret; 2412 2413 if (!clk) 2414 return 0; 2415 2416 /* prevent racing with updates to the clock topology */ 2417 clk_prepare_lock(); 2418 2419 if (clk->exclusive_count) 2420 clk_core_rate_unprotect(clk->core); 2421 2422 ret = clk_core_set_rate_nolock(clk->core, rate); 2423 2424 if (clk->exclusive_count) 2425 clk_core_rate_protect(clk->core); 2426 2427 clk_prepare_unlock(); 2428 2429 return ret; 2430 } 2431 EXPORT_SYMBOL_GPL(clk_set_rate); 2432 2433 /** 2434 * clk_set_rate_exclusive - specify a new rate and get exclusive control 2435 * @clk: the clk whose rate is being changed 2436 * @rate: the new rate for clk 2437 * 2438 * This is a combination of clk_set_rate() and clk_rate_exclusive_get() 2439 * within a critical section 2440 * 2441 * This can be used initially to ensure that at least 1 consumer is 2442 * satisfied when several consumers are competing for exclusivity over the 2443 * same clock provider. 2444 * 2445 * The exclusivity is not applied if setting the rate failed. 2446 * 2447 * Calls to clk_rate_exclusive_get() should be balanced with calls to 2448 * clk_rate_exclusive_put(). 2449 * 2450 * Returns 0 on success, -EERROR otherwise. 2451 */ 2452 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 2453 { 2454 int ret; 2455 2456 if (!clk) 2457 return 0; 2458 2459 /* prevent racing with updates to the clock topology */ 2460 clk_prepare_lock(); 2461 2462 /* 2463 * The temporary protection removal is not here, on purpose 2464 * This function is meant to be used instead of clk_rate_protect, 2465 * so before the consumer code path protect the clock provider 2466 */ 2467 2468 ret = clk_core_set_rate_nolock(clk->core, rate); 2469 if (!ret) { 2470 clk_core_rate_protect(clk->core); 2471 clk->exclusive_count++; 2472 } 2473 2474 clk_prepare_unlock(); 2475 2476 return ret; 2477 } 2478 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); 2479 2480 static int clk_set_rate_range_nolock(struct clk *clk, 2481 unsigned long min, 2482 unsigned long max) 2483 { 2484 int ret = 0; 2485 unsigned long old_min, old_max, rate; 2486 2487 lockdep_assert_held(&prepare_lock); 2488 2489 if (!clk) 2490 return 0; 2491 2492 trace_clk_set_rate_range(clk->core, min, max); 2493 2494 if (min > max) { 2495 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", 2496 __func__, clk->core->name, clk->dev_id, clk->con_id, 2497 min, max); 2498 return -EINVAL; 2499 } 2500 2501 if (clk->exclusive_count) 2502 clk_core_rate_unprotect(clk->core); 2503 2504 /* Save the current values in case we need to rollback the change */ 2505 old_min = clk->min_rate; 2506 old_max = clk->max_rate; 2507 clk->min_rate = min; 2508 clk->max_rate = max; 2509 2510 if (!clk_core_check_boundaries(clk->core, min, max)) { 2511 ret = -EINVAL; 2512 goto out; 2513 } 2514 2515 rate = clk->core->req_rate; 2516 if (clk->core->flags & CLK_GET_RATE_NOCACHE) 2517 rate = clk_core_get_rate_recalc(clk->core); 2518 2519 /* 2520 * Since the boundaries have been changed, let's give the 2521 * opportunity to the provider to adjust the clock rate based on 2522 * the new boundaries. 2523 * 2524 * We also need to handle the case where the clock is currently 2525 * outside of the boundaries. Clamping the last requested rate 2526 * to the current minimum and maximum will also handle this. 2527 * 2528 * FIXME: 2529 * There is a catch. It may fail for the usual reason (clock 2530 * broken, clock protected, etc) but also because: 2531 * - round_rate() was not favorable and fell on the wrong 2532 * side of the boundary 2533 * - the determine_rate() callback does not really check for 2534 * this corner case when determining the rate 2535 */ 2536 rate = clamp(rate, min, max); 2537 ret = clk_core_set_rate_nolock(clk->core, rate); 2538 if (ret) { 2539 /* rollback the changes */ 2540 clk->min_rate = old_min; 2541 clk->max_rate = old_max; 2542 } 2543 2544 out: 2545 if (clk->exclusive_count) 2546 clk_core_rate_protect(clk->core); 2547 2548 return ret; 2549 } 2550 2551 /** 2552 * clk_set_rate_range - set a rate range for a clock source 2553 * @clk: clock source 2554 * @min: desired minimum clock rate in Hz, inclusive 2555 * @max: desired maximum clock rate in Hz, inclusive 2556 * 2557 * Return: 0 for success or negative errno on failure. 2558 */ 2559 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) 2560 { 2561 int ret; 2562 2563 if (!clk) 2564 return 0; 2565 2566 clk_prepare_lock(); 2567 2568 ret = clk_set_rate_range_nolock(clk, min, max); 2569 2570 clk_prepare_unlock(); 2571 2572 return ret; 2573 } 2574 EXPORT_SYMBOL_GPL(clk_set_rate_range); 2575 2576 /** 2577 * clk_set_min_rate - set a minimum clock rate for a clock source 2578 * @clk: clock source 2579 * @rate: desired minimum clock rate in Hz, inclusive 2580 * 2581 * Returns success (0) or negative errno. 2582 */ 2583 int clk_set_min_rate(struct clk *clk, unsigned long rate) 2584 { 2585 if (!clk) 2586 return 0; 2587 2588 trace_clk_set_min_rate(clk->core, rate); 2589 2590 return clk_set_rate_range(clk, rate, clk->max_rate); 2591 } 2592 EXPORT_SYMBOL_GPL(clk_set_min_rate); 2593 2594 /** 2595 * clk_set_max_rate - set a maximum clock rate for a clock source 2596 * @clk: clock source 2597 * @rate: desired maximum clock rate in Hz, inclusive 2598 * 2599 * Returns success (0) or negative errno. 2600 */ 2601 int clk_set_max_rate(struct clk *clk, unsigned long rate) 2602 { 2603 if (!clk) 2604 return 0; 2605 2606 trace_clk_set_max_rate(clk->core, rate); 2607 2608 return clk_set_rate_range(clk, clk->min_rate, rate); 2609 } 2610 EXPORT_SYMBOL_GPL(clk_set_max_rate); 2611 2612 /** 2613 * clk_get_parent - return the parent of a clk 2614 * @clk: the clk whose parent gets returned 2615 * 2616 * Simply returns clk->parent. Returns NULL if clk is NULL. 2617 */ 2618 struct clk *clk_get_parent(struct clk *clk) 2619 { 2620 struct clk *parent; 2621 2622 if (!clk) 2623 return NULL; 2624 2625 clk_prepare_lock(); 2626 /* TODO: Create a per-user clk and change callers to call clk_put */ 2627 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; 2628 clk_prepare_unlock(); 2629 2630 return parent; 2631 } 2632 EXPORT_SYMBOL_GPL(clk_get_parent); 2633 2634 static struct clk_core *__clk_init_parent(struct clk_core *core) 2635 { 2636 u8 index = 0; 2637 2638 if (core->num_parents > 1 && core->ops->get_parent) 2639 index = core->ops->get_parent(core->hw); 2640 2641 return clk_core_get_parent_by_index(core, index); 2642 } 2643 2644 static void clk_core_reparent(struct clk_core *core, 2645 struct clk_core *new_parent) 2646 { 2647 clk_reparent(core, new_parent); 2648 __clk_recalc_accuracies(core); 2649 __clk_recalc_rates(core, true, POST_RATE_CHANGE); 2650 } 2651 2652 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) 2653 { 2654 if (!hw) 2655 return; 2656 2657 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); 2658 } 2659 2660 /** 2661 * clk_has_parent - check if a clock is a possible parent for another 2662 * @clk: clock source 2663 * @parent: parent clock source 2664 * 2665 * This function can be used in drivers that need to check that a clock can be 2666 * the parent of another without actually changing the parent. 2667 * 2668 * Returns true if @parent is a possible parent for @clk, false otherwise. 2669 */ 2670 bool clk_has_parent(const struct clk *clk, const struct clk *parent) 2671 { 2672 /* NULL clocks should be nops, so return success if either is NULL. */ 2673 if (!clk || !parent) 2674 return true; 2675 2676 return clk_core_has_parent(clk->core, parent->core); 2677 } 2678 EXPORT_SYMBOL_GPL(clk_has_parent); 2679 2680 static int clk_core_set_parent_nolock(struct clk_core *core, 2681 struct clk_core *parent) 2682 { 2683 int ret = 0; 2684 int p_index = 0; 2685 unsigned long p_rate = 0; 2686 2687 lockdep_assert_held(&prepare_lock); 2688 2689 if (!core) 2690 return 0; 2691 2692 if (core->parent == parent) 2693 return 0; 2694 2695 /* verify ops for multi-parent clks */ 2696 if (core->num_parents > 1 && !core->ops->set_parent) 2697 return -EPERM; 2698 2699 /* check that we are allowed to re-parent if the clock is in use */ 2700 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) 2701 return -EBUSY; 2702 2703 if (clk_core_rate_is_protected(core)) 2704 return -EBUSY; 2705 2706 /* try finding the new parent index */ 2707 if (parent) { 2708 p_index = clk_fetch_parent_index(core, parent); 2709 if (p_index < 0) { 2710 pr_debug("%s: clk %s can not be parent of clk %s\n", 2711 __func__, parent->name, core->name); 2712 return p_index; 2713 } 2714 p_rate = parent->rate; 2715 } 2716 2717 ret = clk_pm_runtime_get(core); 2718 if (ret) 2719 return ret; 2720 2721 /* propagate PRE_RATE_CHANGE notifications */ 2722 ret = __clk_speculate_rates(core, p_rate); 2723 2724 /* abort if a driver objects */ 2725 if (ret & NOTIFY_STOP_MASK) 2726 goto runtime_put; 2727 2728 /* do the re-parent */ 2729 ret = __clk_set_parent(core, parent, p_index); 2730 2731 /* propagate rate an accuracy recalculation accordingly */ 2732 if (ret) { 2733 __clk_recalc_rates(core, true, ABORT_RATE_CHANGE); 2734 } else { 2735 __clk_recalc_rates(core, true, POST_RATE_CHANGE); 2736 __clk_recalc_accuracies(core); 2737 } 2738 2739 runtime_put: 2740 clk_pm_runtime_put(core); 2741 2742 return ret; 2743 } 2744 2745 int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent) 2746 { 2747 return clk_core_set_parent_nolock(hw->core, parent->core); 2748 } 2749 EXPORT_SYMBOL_GPL(clk_hw_set_parent); 2750 2751 /** 2752 * clk_set_parent - switch the parent of a mux clk 2753 * @clk: the mux clk whose input we are switching 2754 * @parent: the new input to clk 2755 * 2756 * Re-parent clk to use parent as its new input source. If clk is in 2757 * prepared state, the clk will get enabled for the duration of this call. If 2758 * that's not acceptable for a specific clk (Eg: the consumer can't handle 2759 * that, the reparenting is glitchy in hardware, etc), use the 2760 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 2761 * 2762 * After successfully changing clk's parent clk_set_parent will update the 2763 * clk topology, sysfs topology and propagate rate recalculation via 2764 * __clk_recalc_rates. 2765 * 2766 * Returns 0 on success, -EERROR otherwise. 2767 */ 2768 int clk_set_parent(struct clk *clk, struct clk *parent) 2769 { 2770 int ret; 2771 2772 if (!clk) 2773 return 0; 2774 2775 clk_prepare_lock(); 2776 2777 if (clk->exclusive_count) 2778 clk_core_rate_unprotect(clk->core); 2779 2780 ret = clk_core_set_parent_nolock(clk->core, 2781 parent ? parent->core : NULL); 2782 2783 if (clk->exclusive_count) 2784 clk_core_rate_protect(clk->core); 2785 2786 clk_prepare_unlock(); 2787 2788 return ret; 2789 } 2790 EXPORT_SYMBOL_GPL(clk_set_parent); 2791 2792 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) 2793 { 2794 int ret = -EINVAL; 2795 2796 lockdep_assert_held(&prepare_lock); 2797 2798 if (!core) 2799 return 0; 2800 2801 if (clk_core_rate_is_protected(core)) 2802 return -EBUSY; 2803 2804 trace_clk_set_phase(core, degrees); 2805 2806 if (core->ops->set_phase) { 2807 ret = core->ops->set_phase(core->hw, degrees); 2808 if (!ret) 2809 core->phase = degrees; 2810 } 2811 2812 trace_clk_set_phase_complete(core, degrees); 2813 2814 return ret; 2815 } 2816 2817 /** 2818 * clk_set_phase - adjust the phase shift of a clock signal 2819 * @clk: clock signal source 2820 * @degrees: number of degrees the signal is shifted 2821 * 2822 * Shifts the phase of a clock signal by the specified 2823 * degrees. Returns 0 on success, -EERROR otherwise. 2824 * 2825 * This function makes no distinction about the input or reference 2826 * signal that we adjust the clock signal phase against. For example 2827 * phase locked-loop clock signal generators we may shift phase with 2828 * respect to feedback clock signal input, but for other cases the 2829 * clock phase may be shifted with respect to some other, unspecified 2830 * signal. 2831 * 2832 * Additionally the concept of phase shift does not propagate through 2833 * the clock tree hierarchy, which sets it apart from clock rates and 2834 * clock accuracy. A parent clock phase attribute does not have an 2835 * impact on the phase attribute of a child clock. 2836 */ 2837 int clk_set_phase(struct clk *clk, int degrees) 2838 { 2839 int ret; 2840 2841 if (!clk) 2842 return 0; 2843 2844 /* sanity check degrees */ 2845 degrees %= 360; 2846 if (degrees < 0) 2847 degrees += 360; 2848 2849 clk_prepare_lock(); 2850 2851 if (clk->exclusive_count) 2852 clk_core_rate_unprotect(clk->core); 2853 2854 ret = clk_core_set_phase_nolock(clk->core, degrees); 2855 2856 if (clk->exclusive_count) 2857 clk_core_rate_protect(clk->core); 2858 2859 clk_prepare_unlock(); 2860 2861 return ret; 2862 } 2863 EXPORT_SYMBOL_GPL(clk_set_phase); 2864 2865 static int clk_core_get_phase(struct clk_core *core) 2866 { 2867 int ret; 2868 2869 lockdep_assert_held(&prepare_lock); 2870 if (!core->ops->get_phase) 2871 return 0; 2872 2873 /* Always try to update cached phase if possible */ 2874 ret = core->ops->get_phase(core->hw); 2875 if (ret >= 0) 2876 core->phase = ret; 2877 2878 return ret; 2879 } 2880 2881 /** 2882 * clk_get_phase - return the phase shift of a clock signal 2883 * @clk: clock signal source 2884 * 2885 * Returns the phase shift of a clock node in degrees, otherwise returns 2886 * -EERROR. 2887 */ 2888 int clk_get_phase(struct clk *clk) 2889 { 2890 int ret; 2891 2892 if (!clk) 2893 return 0; 2894 2895 clk_prepare_lock(); 2896 ret = clk_core_get_phase(clk->core); 2897 clk_prepare_unlock(); 2898 2899 return ret; 2900 } 2901 EXPORT_SYMBOL_GPL(clk_get_phase); 2902 2903 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) 2904 { 2905 /* Assume a default value of 50% */ 2906 core->duty.num = 1; 2907 core->duty.den = 2; 2908 } 2909 2910 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core); 2911 2912 static int clk_core_update_duty_cycle_nolock(struct clk_core *core) 2913 { 2914 struct clk_duty *duty = &core->duty; 2915 int ret = 0; 2916 2917 if (!core->ops->get_duty_cycle) 2918 return clk_core_update_duty_cycle_parent_nolock(core); 2919 2920 ret = core->ops->get_duty_cycle(core->hw, duty); 2921 if (ret) 2922 goto reset; 2923 2924 /* Don't trust the clock provider too much */ 2925 if (duty->den == 0 || duty->num > duty->den) { 2926 ret = -EINVAL; 2927 goto reset; 2928 } 2929 2930 return 0; 2931 2932 reset: 2933 clk_core_reset_duty_cycle_nolock(core); 2934 return ret; 2935 } 2936 2937 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) 2938 { 2939 int ret = 0; 2940 2941 if (core->parent && 2942 core->flags & CLK_DUTY_CYCLE_PARENT) { 2943 ret = clk_core_update_duty_cycle_nolock(core->parent); 2944 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); 2945 } else { 2946 clk_core_reset_duty_cycle_nolock(core); 2947 } 2948 2949 return ret; 2950 } 2951 2952 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, 2953 struct clk_duty *duty); 2954 2955 static int clk_core_set_duty_cycle_nolock(struct clk_core *core, 2956 struct clk_duty *duty) 2957 { 2958 int ret; 2959 2960 lockdep_assert_held(&prepare_lock); 2961 2962 if (clk_core_rate_is_protected(core)) 2963 return -EBUSY; 2964 2965 trace_clk_set_duty_cycle(core, duty); 2966 2967 if (!core->ops->set_duty_cycle) 2968 return clk_core_set_duty_cycle_parent_nolock(core, duty); 2969 2970 ret = core->ops->set_duty_cycle(core->hw, duty); 2971 if (!ret) 2972 memcpy(&core->duty, duty, sizeof(*duty)); 2973 2974 trace_clk_set_duty_cycle_complete(core, duty); 2975 2976 return ret; 2977 } 2978 2979 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, 2980 struct clk_duty *duty) 2981 { 2982 int ret = 0; 2983 2984 if (core->parent && 2985 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { 2986 ret = clk_core_set_duty_cycle_nolock(core->parent, duty); 2987 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); 2988 } 2989 2990 return ret; 2991 } 2992 2993 /** 2994 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 2995 * @clk: clock signal source 2996 * @num: numerator of the duty cycle ratio to be applied 2997 * @den: denominator of the duty cycle ratio to be applied 2998 * 2999 * Apply the duty cycle ratio if the ratio is valid and the clock can 3000 * perform this operation 3001 * 3002 * Returns (0) on success, a negative errno otherwise. 3003 */ 3004 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) 3005 { 3006 int ret; 3007 struct clk_duty duty; 3008 3009 if (!clk) 3010 return 0; 3011 3012 /* sanity check the ratio */ 3013 if (den == 0 || num > den) 3014 return -EINVAL; 3015 3016 duty.num = num; 3017 duty.den = den; 3018 3019 clk_prepare_lock(); 3020 3021 if (clk->exclusive_count) 3022 clk_core_rate_unprotect(clk->core); 3023 3024 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); 3025 3026 if (clk->exclusive_count) 3027 clk_core_rate_protect(clk->core); 3028 3029 clk_prepare_unlock(); 3030 3031 return ret; 3032 } 3033 EXPORT_SYMBOL_GPL(clk_set_duty_cycle); 3034 3035 static int clk_core_get_scaled_duty_cycle(struct clk_core *core, 3036 unsigned int scale) 3037 { 3038 struct clk_duty *duty = &core->duty; 3039 int ret; 3040 3041 clk_prepare_lock(); 3042 3043 ret = clk_core_update_duty_cycle_nolock(core); 3044 if (!ret) 3045 ret = mult_frac(scale, duty->num, duty->den); 3046 3047 clk_prepare_unlock(); 3048 3049 return ret; 3050 } 3051 3052 /** 3053 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal 3054 * @clk: clock signal source 3055 * @scale: scaling factor to be applied to represent the ratio as an integer 3056 * 3057 * Returns the duty cycle ratio of a clock node multiplied by the provided 3058 * scaling factor, or negative errno on error. 3059 */ 3060 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) 3061 { 3062 if (!clk) 3063 return 0; 3064 3065 return clk_core_get_scaled_duty_cycle(clk->core, scale); 3066 } 3067 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle); 3068 3069 /** 3070 * clk_is_match - check if two clk's point to the same hardware clock 3071 * @p: clk compared against q 3072 * @q: clk compared against p 3073 * 3074 * Returns true if the two struct clk pointers both point to the same hardware 3075 * clock node. Put differently, returns true if struct clk *p and struct clk *q 3076 * share the same struct clk_core object. 3077 * 3078 * Returns false otherwise. Note that two NULL clks are treated as matching. 3079 */ 3080 bool clk_is_match(const struct clk *p, const struct clk *q) 3081 { 3082 /* trivial case: identical struct clk's or both NULL */ 3083 if (p == q) 3084 return true; 3085 3086 /* true if clk->core pointers match. Avoid dereferencing garbage */ 3087 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) 3088 if (p->core == q->core) 3089 return true; 3090 3091 return false; 3092 } 3093 EXPORT_SYMBOL_GPL(clk_is_match); 3094 3095 /*** debugfs support ***/ 3096 3097 #ifdef CONFIG_DEBUG_FS 3098 #include <linux/debugfs.h> 3099 3100 static struct dentry *rootdir; 3101 static int inited = 0; 3102 static DEFINE_MUTEX(clk_debug_lock); 3103 static HLIST_HEAD(clk_debug_list); 3104 3105 static struct hlist_head *orphan_list[] = { 3106 &clk_orphan_list, 3107 NULL, 3108 }; 3109 3110 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, 3111 int level) 3112 { 3113 int phase; 3114 3115 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ", 3116 level * 3 + 1, "", 3117 30 - level * 3, c->name, 3118 c->enable_count, c->prepare_count, c->protect_count, 3119 clk_core_get_rate_recalc(c), 3120 clk_core_get_accuracy_recalc(c)); 3121 3122 phase = clk_core_get_phase(c); 3123 if (phase >= 0) 3124 seq_printf(s, "%5d", phase); 3125 else 3126 seq_puts(s, "-----"); 3127 3128 seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000)); 3129 3130 if (c->ops->is_enabled) 3131 seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N'); 3132 else if (!c->ops->enable) 3133 seq_printf(s, " %9c\n", 'Y'); 3134 else 3135 seq_printf(s, " %9c\n", '?'); 3136 } 3137 3138 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, 3139 int level) 3140 { 3141 struct clk_core *child; 3142 3143 clk_pm_runtime_get(c); 3144 clk_summary_show_one(s, c, level); 3145 clk_pm_runtime_put(c); 3146 3147 hlist_for_each_entry(child, &c->children, child_node) 3148 clk_summary_show_subtree(s, child, level + 1); 3149 } 3150 3151 static int clk_summary_show(struct seq_file *s, void *data) 3152 { 3153 struct clk_core *c; 3154 struct hlist_head **lists = (struct hlist_head **)s->private; 3155 3156 seq_puts(s, " enable prepare protect duty hardware\n"); 3157 seq_puts(s, " clock count count count rate accuracy phase cycle enable\n"); 3158 seq_puts(s, "-------------------------------------------------------------------------------------------------------\n"); 3159 3160 clk_prepare_lock(); 3161 3162 for (; *lists; lists++) 3163 hlist_for_each_entry(c, *lists, child_node) 3164 clk_summary_show_subtree(s, c, 0); 3165 3166 clk_prepare_unlock(); 3167 3168 return 0; 3169 } 3170 DEFINE_SHOW_ATTRIBUTE(clk_summary); 3171 3172 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) 3173 { 3174 int phase; 3175 unsigned long min_rate, max_rate; 3176 3177 clk_core_get_boundaries(c, &min_rate, &max_rate); 3178 3179 /* This should be JSON format, i.e. elements separated with a comma */ 3180 seq_printf(s, "\"%s\": { ", c->name); 3181 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 3182 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 3183 seq_printf(s, "\"protect_count\": %d,", c->protect_count); 3184 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c)); 3185 seq_printf(s, "\"min_rate\": %lu,", min_rate); 3186 seq_printf(s, "\"max_rate\": %lu,", max_rate); 3187 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c)); 3188 phase = clk_core_get_phase(c); 3189 if (phase >= 0) 3190 seq_printf(s, "\"phase\": %d,", phase); 3191 seq_printf(s, "\"duty_cycle\": %u", 3192 clk_core_get_scaled_duty_cycle(c, 100000)); 3193 } 3194 3195 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) 3196 { 3197 struct clk_core *child; 3198 3199 clk_dump_one(s, c, level); 3200 3201 hlist_for_each_entry(child, &c->children, child_node) { 3202 seq_putc(s, ','); 3203 clk_dump_subtree(s, child, level + 1); 3204 } 3205 3206 seq_putc(s, '}'); 3207 } 3208 3209 static int clk_dump_show(struct seq_file *s, void *data) 3210 { 3211 struct clk_core *c; 3212 bool first_node = true; 3213 struct hlist_head **lists = (struct hlist_head **)s->private; 3214 3215 seq_putc(s, '{'); 3216 clk_prepare_lock(); 3217 3218 for (; *lists; lists++) { 3219 hlist_for_each_entry(c, *lists, child_node) { 3220 if (!first_node) 3221 seq_putc(s, ','); 3222 first_node = false; 3223 clk_dump_subtree(s, c, 0); 3224 } 3225 } 3226 3227 clk_prepare_unlock(); 3228 3229 seq_puts(s, "}\n"); 3230 return 0; 3231 } 3232 DEFINE_SHOW_ATTRIBUTE(clk_dump); 3233 3234 #undef CLOCK_ALLOW_WRITE_DEBUGFS 3235 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3236 /* 3237 * This can be dangerous, therefore don't provide any real compile time 3238 * configuration option for this feature. 3239 * People who want to use this will need to modify the source code directly. 3240 */ 3241 static int clk_rate_set(void *data, u64 val) 3242 { 3243 struct clk_core *core = data; 3244 int ret; 3245 3246 clk_prepare_lock(); 3247 ret = clk_core_set_rate_nolock(core, val); 3248 clk_prepare_unlock(); 3249 3250 return ret; 3251 } 3252 3253 #define clk_rate_mode 0644 3254 3255 static int clk_prepare_enable_set(void *data, u64 val) 3256 { 3257 struct clk_core *core = data; 3258 int ret = 0; 3259 3260 if (val) 3261 ret = clk_prepare_enable(core->hw->clk); 3262 else 3263 clk_disable_unprepare(core->hw->clk); 3264 3265 return ret; 3266 } 3267 3268 static int clk_prepare_enable_get(void *data, u64 *val) 3269 { 3270 struct clk_core *core = data; 3271 3272 *val = core->enable_count && core->prepare_count; 3273 return 0; 3274 } 3275 3276 DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get, 3277 clk_prepare_enable_set, "%llu\n"); 3278 3279 #else 3280 #define clk_rate_set NULL 3281 #define clk_rate_mode 0444 3282 #endif 3283 3284 static int clk_rate_get(void *data, u64 *val) 3285 { 3286 struct clk_core *core = data; 3287 3288 clk_prepare_lock(); 3289 *val = clk_core_get_rate_recalc(core); 3290 clk_prepare_unlock(); 3291 3292 return 0; 3293 } 3294 3295 DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n"); 3296 3297 static const struct { 3298 unsigned long flag; 3299 const char *name; 3300 } clk_flags[] = { 3301 #define ENTRY(f) { f, #f } 3302 ENTRY(CLK_SET_RATE_GATE), 3303 ENTRY(CLK_SET_PARENT_GATE), 3304 ENTRY(CLK_SET_RATE_PARENT), 3305 ENTRY(CLK_IGNORE_UNUSED), 3306 ENTRY(CLK_GET_RATE_NOCACHE), 3307 ENTRY(CLK_SET_RATE_NO_REPARENT), 3308 ENTRY(CLK_GET_ACCURACY_NOCACHE), 3309 ENTRY(CLK_RECALC_NEW_RATES), 3310 ENTRY(CLK_SET_RATE_UNGATE), 3311 ENTRY(CLK_IS_CRITICAL), 3312 ENTRY(CLK_OPS_PARENT_ENABLE), 3313 ENTRY(CLK_DUTY_CYCLE_PARENT), 3314 #undef ENTRY 3315 }; 3316 3317 static int clk_flags_show(struct seq_file *s, void *data) 3318 { 3319 struct clk_core *core = s->private; 3320 unsigned long flags = core->flags; 3321 unsigned int i; 3322 3323 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) { 3324 if (flags & clk_flags[i].flag) { 3325 seq_printf(s, "%s\n", clk_flags[i].name); 3326 flags &= ~clk_flags[i].flag; 3327 } 3328 } 3329 if (flags) { 3330 /* Unknown flags */ 3331 seq_printf(s, "0x%lx\n", flags); 3332 } 3333 3334 return 0; 3335 } 3336 DEFINE_SHOW_ATTRIBUTE(clk_flags); 3337 3338 static void possible_parent_show(struct seq_file *s, struct clk_core *core, 3339 unsigned int i, char terminator) 3340 { 3341 struct clk_core *parent; 3342 3343 /* 3344 * Go through the following options to fetch a parent's name. 3345 * 3346 * 1. Fetch the registered parent clock and use its name 3347 * 2. Use the global (fallback) name if specified 3348 * 3. Use the local fw_name if provided 3349 * 4. Fetch parent clock's clock-output-name if DT index was set 3350 * 3351 * This may still fail in some cases, such as when the parent is 3352 * specified directly via a struct clk_hw pointer, but it isn't 3353 * registered (yet). 3354 */ 3355 parent = clk_core_get_parent_by_index(core, i); 3356 if (parent) 3357 seq_puts(s, parent->name); 3358 else if (core->parents[i].name) 3359 seq_puts(s, core->parents[i].name); 3360 else if (core->parents[i].fw_name) 3361 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name); 3362 else if (core->parents[i].index >= 0) 3363 seq_puts(s, 3364 of_clk_get_parent_name(core->of_node, 3365 core->parents[i].index)); 3366 else 3367 seq_puts(s, "(missing)"); 3368 3369 seq_putc(s, terminator); 3370 } 3371 3372 static int possible_parents_show(struct seq_file *s, void *data) 3373 { 3374 struct clk_core *core = s->private; 3375 int i; 3376 3377 for (i = 0; i < core->num_parents - 1; i++) 3378 possible_parent_show(s, core, i, ' '); 3379 3380 possible_parent_show(s, core, i, '\n'); 3381 3382 return 0; 3383 } 3384 DEFINE_SHOW_ATTRIBUTE(possible_parents); 3385 3386 static int current_parent_show(struct seq_file *s, void *data) 3387 { 3388 struct clk_core *core = s->private; 3389 3390 if (core->parent) 3391 seq_printf(s, "%s\n", core->parent->name); 3392 3393 return 0; 3394 } 3395 DEFINE_SHOW_ATTRIBUTE(current_parent); 3396 3397 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3398 static ssize_t current_parent_write(struct file *file, const char __user *ubuf, 3399 size_t count, loff_t *ppos) 3400 { 3401 struct seq_file *s = file->private_data; 3402 struct clk_core *core = s->private; 3403 struct clk_core *parent; 3404 u8 idx; 3405 int err; 3406 3407 err = kstrtou8_from_user(ubuf, count, 0, &idx); 3408 if (err < 0) 3409 return err; 3410 3411 parent = clk_core_get_parent_by_index(core, idx); 3412 if (!parent) 3413 return -ENOENT; 3414 3415 clk_prepare_lock(); 3416 err = clk_core_set_parent_nolock(core, parent); 3417 clk_prepare_unlock(); 3418 if (err) 3419 return err; 3420 3421 return count; 3422 } 3423 3424 static const struct file_operations current_parent_rw_fops = { 3425 .open = current_parent_open, 3426 .write = current_parent_write, 3427 .read = seq_read, 3428 .llseek = seq_lseek, 3429 .release = single_release, 3430 }; 3431 #endif 3432 3433 static int clk_duty_cycle_show(struct seq_file *s, void *data) 3434 { 3435 struct clk_core *core = s->private; 3436 struct clk_duty *duty = &core->duty; 3437 3438 seq_printf(s, "%u/%u\n", duty->num, duty->den); 3439 3440 return 0; 3441 } 3442 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle); 3443 3444 static int clk_min_rate_show(struct seq_file *s, void *data) 3445 { 3446 struct clk_core *core = s->private; 3447 unsigned long min_rate, max_rate; 3448 3449 clk_prepare_lock(); 3450 clk_core_get_boundaries(core, &min_rate, &max_rate); 3451 clk_prepare_unlock(); 3452 seq_printf(s, "%lu\n", min_rate); 3453 3454 return 0; 3455 } 3456 DEFINE_SHOW_ATTRIBUTE(clk_min_rate); 3457 3458 static int clk_max_rate_show(struct seq_file *s, void *data) 3459 { 3460 struct clk_core *core = s->private; 3461 unsigned long min_rate, max_rate; 3462 3463 clk_prepare_lock(); 3464 clk_core_get_boundaries(core, &min_rate, &max_rate); 3465 clk_prepare_unlock(); 3466 seq_printf(s, "%lu\n", max_rate); 3467 3468 return 0; 3469 } 3470 DEFINE_SHOW_ATTRIBUTE(clk_max_rate); 3471 3472 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) 3473 { 3474 struct dentry *root; 3475 3476 if (!core || !pdentry) 3477 return; 3478 3479 root = debugfs_create_dir(core->name, pdentry); 3480 core->dentry = root; 3481 3482 debugfs_create_file("clk_rate", clk_rate_mode, root, core, 3483 &clk_rate_fops); 3484 debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops); 3485 debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops); 3486 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); 3487 debugfs_create_u32("clk_phase", 0444, root, &core->phase); 3488 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); 3489 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); 3490 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); 3491 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); 3492 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); 3493 debugfs_create_file("clk_duty_cycle", 0444, root, core, 3494 &clk_duty_cycle_fops); 3495 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3496 debugfs_create_file("clk_prepare_enable", 0644, root, core, 3497 &clk_prepare_enable_fops); 3498 3499 if (core->num_parents > 1) 3500 debugfs_create_file("clk_parent", 0644, root, core, 3501 ¤t_parent_rw_fops); 3502 else 3503 #endif 3504 if (core->num_parents > 0) 3505 debugfs_create_file("clk_parent", 0444, root, core, 3506 ¤t_parent_fops); 3507 3508 if (core->num_parents > 1) 3509 debugfs_create_file("clk_possible_parents", 0444, root, core, 3510 &possible_parents_fops); 3511 3512 if (core->ops->debug_init) 3513 core->ops->debug_init(core->hw, core->dentry); 3514 } 3515 3516 /** 3517 * clk_debug_register - add a clk node to the debugfs clk directory 3518 * @core: the clk being added to the debugfs clk directory 3519 * 3520 * Dynamically adds a clk to the debugfs clk directory if debugfs has been 3521 * initialized. Otherwise it bails out early since the debugfs clk directory 3522 * will be created lazily by clk_debug_init as part of a late_initcall. 3523 */ 3524 static void clk_debug_register(struct clk_core *core) 3525 { 3526 mutex_lock(&clk_debug_lock); 3527 hlist_add_head(&core->debug_node, &clk_debug_list); 3528 if (inited) 3529 clk_debug_create_one(core, rootdir); 3530 mutex_unlock(&clk_debug_lock); 3531 } 3532 3533 /** 3534 * clk_debug_unregister - remove a clk node from the debugfs clk directory 3535 * @core: the clk being removed from the debugfs clk directory 3536 * 3537 * Dynamically removes a clk and all its child nodes from the 3538 * debugfs clk directory if clk->dentry points to debugfs created by 3539 * clk_debug_register in __clk_core_init. 3540 */ 3541 static void clk_debug_unregister(struct clk_core *core) 3542 { 3543 mutex_lock(&clk_debug_lock); 3544 hlist_del_init(&core->debug_node); 3545 debugfs_remove_recursive(core->dentry); 3546 core->dentry = NULL; 3547 mutex_unlock(&clk_debug_lock); 3548 } 3549 3550 /** 3551 * clk_debug_init - lazily populate the debugfs clk directory 3552 * 3553 * clks are often initialized very early during boot before memory can be 3554 * dynamically allocated and well before debugfs is setup. This function 3555 * populates the debugfs clk directory once at boot-time when we know that 3556 * debugfs is setup. It should only be called once at boot-time, all other clks 3557 * added dynamically will be done so with clk_debug_register. 3558 */ 3559 static int __init clk_debug_init(void) 3560 { 3561 struct clk_core *core; 3562 3563 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3564 pr_warn("\n"); 3565 pr_warn("********************************************************************\n"); 3566 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3567 pr_warn("** **\n"); 3568 pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n"); 3569 pr_warn("** **\n"); 3570 pr_warn("** This means that this kernel is built to expose clk operations **\n"); 3571 pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n"); 3572 pr_warn("** to userspace, which may compromise security on your system. **\n"); 3573 pr_warn("** **\n"); 3574 pr_warn("** If you see this message and you are not debugging the **\n"); 3575 pr_warn("** kernel, report this immediately to your vendor! **\n"); 3576 pr_warn("** **\n"); 3577 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3578 pr_warn("********************************************************************\n"); 3579 #endif 3580 3581 rootdir = debugfs_create_dir("clk", NULL); 3582 3583 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, 3584 &clk_summary_fops); 3585 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists, 3586 &clk_dump_fops); 3587 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list, 3588 &clk_summary_fops); 3589 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list, 3590 &clk_dump_fops); 3591 3592 mutex_lock(&clk_debug_lock); 3593 hlist_for_each_entry(core, &clk_debug_list, debug_node) 3594 clk_debug_create_one(core, rootdir); 3595 3596 inited = 1; 3597 mutex_unlock(&clk_debug_lock); 3598 3599 return 0; 3600 } 3601 late_initcall(clk_debug_init); 3602 #else 3603 static inline void clk_debug_register(struct clk_core *core) { } 3604 static inline void clk_debug_unregister(struct clk_core *core) 3605 { 3606 } 3607 #endif 3608 3609 static void clk_core_reparent_orphans_nolock(void) 3610 { 3611 struct clk_core *orphan; 3612 struct hlist_node *tmp2; 3613 3614 /* 3615 * walk the list of orphan clocks and reparent any that newly finds a 3616 * parent. 3617 */ 3618 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 3619 struct clk_core *parent = __clk_init_parent(orphan); 3620 3621 /* 3622 * We need to use __clk_set_parent_before() and _after() to 3623 * properly migrate any prepare/enable count of the orphan 3624 * clock. This is important for CLK_IS_CRITICAL clocks, which 3625 * are enabled during init but might not have a parent yet. 3626 */ 3627 if (parent) { 3628 /* update the clk tree topology */ 3629 __clk_set_parent_before(orphan, parent); 3630 __clk_set_parent_after(orphan, parent, NULL); 3631 __clk_recalc_accuracies(orphan); 3632 __clk_recalc_rates(orphan, true, 0); 3633 3634 /* 3635 * __clk_init_parent() will set the initial req_rate to 3636 * 0 if the clock doesn't have clk_ops::recalc_rate and 3637 * is an orphan when it's registered. 3638 * 3639 * 'req_rate' is used by clk_set_rate_range() and 3640 * clk_put() to trigger a clk_set_rate() call whenever 3641 * the boundaries are modified. Let's make sure 3642 * 'req_rate' is set to something non-zero so that 3643 * clk_set_rate_range() doesn't drop the frequency. 3644 */ 3645 orphan->req_rate = orphan->rate; 3646 } 3647 } 3648 } 3649 3650 /** 3651 * __clk_core_init - initialize the data structures in a struct clk_core 3652 * @core: clk_core being initialized 3653 * 3654 * Initializes the lists in struct clk_core, queries the hardware for the 3655 * parent and rate and sets them both. 3656 */ 3657 static int __clk_core_init(struct clk_core *core) 3658 { 3659 int ret; 3660 struct clk_core *parent; 3661 unsigned long rate; 3662 int phase; 3663 3664 clk_prepare_lock(); 3665 3666 /* 3667 * Set hw->core after grabbing the prepare_lock to synchronize with 3668 * callers of clk_core_fill_parent_index() where we treat hw->core 3669 * being NULL as the clk not being registered yet. This is crucial so 3670 * that clks aren't parented until their parent is fully registered. 3671 */ 3672 core->hw->core = core; 3673 3674 ret = clk_pm_runtime_get(core); 3675 if (ret) 3676 goto unlock; 3677 3678 /* check to see if a clock with this name is already registered */ 3679 if (clk_core_lookup(core->name)) { 3680 pr_debug("%s: clk %s already initialized\n", 3681 __func__, core->name); 3682 ret = -EEXIST; 3683 goto out; 3684 } 3685 3686 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ 3687 if (core->ops->set_rate && 3688 !((core->ops->round_rate || core->ops->determine_rate) && 3689 core->ops->recalc_rate)) { 3690 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 3691 __func__, core->name); 3692 ret = -EINVAL; 3693 goto out; 3694 } 3695 3696 if (core->ops->set_parent && !core->ops->get_parent) { 3697 pr_err("%s: %s must implement .get_parent & .set_parent\n", 3698 __func__, core->name); 3699 ret = -EINVAL; 3700 goto out; 3701 } 3702 3703 if (core->num_parents > 1 && !core->ops->get_parent) { 3704 pr_err("%s: %s must implement .get_parent as it has multi parents\n", 3705 __func__, core->name); 3706 ret = -EINVAL; 3707 goto out; 3708 } 3709 3710 if (core->ops->set_rate_and_parent && 3711 !(core->ops->set_parent && core->ops->set_rate)) { 3712 pr_err("%s: %s must implement .set_parent & .set_rate\n", 3713 __func__, core->name); 3714 ret = -EINVAL; 3715 goto out; 3716 } 3717 3718 /* 3719 * optional platform-specific magic 3720 * 3721 * The .init callback is not used by any of the basic clock types, but 3722 * exists for weird hardware that must perform initialization magic for 3723 * CCF to get an accurate view of clock for any other callbacks. It may 3724 * also be used needs to perform dynamic allocations. Such allocation 3725 * must be freed in the terminate() callback. 3726 * This callback shall not be used to initialize the parameters state, 3727 * such as rate, parent, etc ... 3728 * 3729 * If it exist, this callback should called before any other callback of 3730 * the clock 3731 */ 3732 if (core->ops->init) { 3733 ret = core->ops->init(core->hw); 3734 if (ret) 3735 goto out; 3736 } 3737 3738 parent = core->parent = __clk_init_parent(core); 3739 3740 /* 3741 * Populate core->parent if parent has already been clk_core_init'd. If 3742 * parent has not yet been clk_core_init'd then place clk in the orphan 3743 * list. If clk doesn't have any parents then place it in the root 3744 * clk list. 3745 * 3746 * Every time a new clk is clk_init'd then we walk the list of orphan 3747 * clocks and re-parent any that are children of the clock currently 3748 * being clk_init'd. 3749 */ 3750 if (parent) { 3751 hlist_add_head(&core->child_node, &parent->children); 3752 core->orphan = parent->orphan; 3753 } else if (!core->num_parents) { 3754 hlist_add_head(&core->child_node, &clk_root_list); 3755 core->orphan = false; 3756 } else { 3757 hlist_add_head(&core->child_node, &clk_orphan_list); 3758 core->orphan = true; 3759 } 3760 3761 /* 3762 * Set clk's accuracy. The preferred method is to use 3763 * .recalc_accuracy. For simple clocks and lazy developers the default 3764 * fallback is to use the parent's accuracy. If a clock doesn't have a 3765 * parent (or is orphaned) then accuracy is set to zero (perfect 3766 * clock). 3767 */ 3768 if (core->ops->recalc_accuracy) 3769 core->accuracy = core->ops->recalc_accuracy(core->hw, 3770 clk_core_get_accuracy_no_lock(parent)); 3771 else if (parent) 3772 core->accuracy = parent->accuracy; 3773 else 3774 core->accuracy = 0; 3775 3776 /* 3777 * Set clk's phase by clk_core_get_phase() caching the phase. 3778 * Since a phase is by definition relative to its parent, just 3779 * query the current clock phase, or just assume it's in phase. 3780 */ 3781 phase = clk_core_get_phase(core); 3782 if (phase < 0) { 3783 ret = phase; 3784 pr_warn("%s: Failed to get phase for clk '%s'\n", __func__, 3785 core->name); 3786 goto out; 3787 } 3788 3789 /* 3790 * Set clk's duty cycle. 3791 */ 3792 clk_core_update_duty_cycle_nolock(core); 3793 3794 /* 3795 * Set clk's rate. The preferred method is to use .recalc_rate. For 3796 * simple clocks and lazy developers the default fallback is to use the 3797 * parent's rate. If a clock doesn't have a parent (or is orphaned) 3798 * then rate is set to zero. 3799 */ 3800 if (core->ops->recalc_rate) 3801 rate = core->ops->recalc_rate(core->hw, 3802 clk_core_get_rate_nolock(parent)); 3803 else if (parent) 3804 rate = parent->rate; 3805 else 3806 rate = 0; 3807 core->rate = core->req_rate = rate; 3808 3809 /* 3810 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks 3811 * don't get accidentally disabled when walking the orphan tree and 3812 * reparenting clocks 3813 */ 3814 if (core->flags & CLK_IS_CRITICAL) { 3815 ret = clk_core_prepare(core); 3816 if (ret) { 3817 pr_warn("%s: critical clk '%s' failed to prepare\n", 3818 __func__, core->name); 3819 goto out; 3820 } 3821 3822 ret = clk_core_enable_lock(core); 3823 if (ret) { 3824 pr_warn("%s: critical clk '%s' failed to enable\n", 3825 __func__, core->name); 3826 clk_core_unprepare(core); 3827 goto out; 3828 } 3829 } 3830 3831 clk_core_reparent_orphans_nolock(); 3832 3833 kref_init(&core->ref); 3834 out: 3835 clk_pm_runtime_put(core); 3836 unlock: 3837 if (ret) { 3838 hlist_del_init(&core->child_node); 3839 core->hw->core = NULL; 3840 } 3841 3842 clk_prepare_unlock(); 3843 3844 if (!ret) 3845 clk_debug_register(core); 3846 3847 return ret; 3848 } 3849 3850 /** 3851 * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core 3852 * @core: clk to add consumer to 3853 * @clk: consumer to link to a clk 3854 */ 3855 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk) 3856 { 3857 clk_prepare_lock(); 3858 hlist_add_head(&clk->clks_node, &core->clks); 3859 clk_prepare_unlock(); 3860 } 3861 3862 /** 3863 * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core 3864 * @clk: consumer to unlink 3865 */ 3866 static void clk_core_unlink_consumer(struct clk *clk) 3867 { 3868 lockdep_assert_held(&prepare_lock); 3869 hlist_del(&clk->clks_node); 3870 } 3871 3872 /** 3873 * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core 3874 * @core: clk to allocate a consumer for 3875 * @dev_id: string describing device name 3876 * @con_id: connection ID string on device 3877 * 3878 * Returns: clk consumer left unlinked from the consumer list 3879 */ 3880 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id, 3881 const char *con_id) 3882 { 3883 struct clk *clk; 3884 3885 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 3886 if (!clk) 3887 return ERR_PTR(-ENOMEM); 3888 3889 clk->core = core; 3890 clk->dev_id = dev_id; 3891 clk->con_id = kstrdup_const(con_id, GFP_KERNEL); 3892 clk->max_rate = ULONG_MAX; 3893 3894 return clk; 3895 } 3896 3897 /** 3898 * free_clk - Free a clk consumer 3899 * @clk: clk consumer to free 3900 * 3901 * Note, this assumes the clk has been unlinked from the clk_core consumer 3902 * list. 3903 */ 3904 static void free_clk(struct clk *clk) 3905 { 3906 kfree_const(clk->con_id); 3907 kfree(clk); 3908 } 3909 3910 /** 3911 * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given 3912 * a clk_hw 3913 * @dev: clk consumer device 3914 * @hw: clk_hw associated with the clk being consumed 3915 * @dev_id: string describing device name 3916 * @con_id: connection ID string on device 3917 * 3918 * This is the main function used to create a clk pointer for use by clk 3919 * consumers. It connects a consumer to the clk_core and clk_hw structures 3920 * used by the framework and clk provider respectively. 3921 */ 3922 struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, 3923 const char *dev_id, const char *con_id) 3924 { 3925 struct clk *clk; 3926 struct clk_core *core; 3927 3928 /* This is to allow this function to be chained to others */ 3929 if (IS_ERR_OR_NULL(hw)) 3930 return ERR_CAST(hw); 3931 3932 core = hw->core; 3933 clk = alloc_clk(core, dev_id, con_id); 3934 if (IS_ERR(clk)) 3935 return clk; 3936 clk->dev = dev; 3937 3938 if (!try_module_get(core->owner)) { 3939 free_clk(clk); 3940 return ERR_PTR(-ENOENT); 3941 } 3942 3943 kref_get(&core->ref); 3944 clk_core_link_consumer(core, clk); 3945 3946 return clk; 3947 } 3948 3949 /** 3950 * clk_hw_get_clk - get clk consumer given an clk_hw 3951 * @hw: clk_hw associated with the clk being consumed 3952 * @con_id: connection ID string on device 3953 * 3954 * Returns: new clk consumer 3955 * This is the function to be used by providers which need 3956 * to get a consumer clk and act on the clock element 3957 * Calls to this function must be balanced with calls clk_put() 3958 */ 3959 struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id) 3960 { 3961 struct device *dev = hw->core->dev; 3962 const char *name = dev ? dev_name(dev) : NULL; 3963 3964 return clk_hw_create_clk(dev, hw, name, con_id); 3965 } 3966 EXPORT_SYMBOL(clk_hw_get_clk); 3967 3968 static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist) 3969 { 3970 const char *dst; 3971 3972 if (!src) { 3973 if (must_exist) 3974 return -EINVAL; 3975 return 0; 3976 } 3977 3978 *dst_p = dst = kstrdup_const(src, GFP_KERNEL); 3979 if (!dst) 3980 return -ENOMEM; 3981 3982 return 0; 3983 } 3984 3985 static int clk_core_populate_parent_map(struct clk_core *core, 3986 const struct clk_init_data *init) 3987 { 3988 u8 num_parents = init->num_parents; 3989 const char * const *parent_names = init->parent_names; 3990 const struct clk_hw **parent_hws = init->parent_hws; 3991 const struct clk_parent_data *parent_data = init->parent_data; 3992 int i, ret = 0; 3993 struct clk_parent_map *parents, *parent; 3994 3995 if (!num_parents) 3996 return 0; 3997 3998 /* 3999 * Avoid unnecessary string look-ups of clk_core's possible parents by 4000 * having a cache of names/clk_hw pointers to clk_core pointers. 4001 */ 4002 parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL); 4003 core->parents = parents; 4004 if (!parents) 4005 return -ENOMEM; 4006 4007 /* Copy everything over because it might be __initdata */ 4008 for (i = 0, parent = parents; i < num_parents; i++, parent++) { 4009 parent->index = -1; 4010 if (parent_names) { 4011 /* throw a WARN if any entries are NULL */ 4012 WARN(!parent_names[i], 4013 "%s: invalid NULL in %s's .parent_names\n", 4014 __func__, core->name); 4015 ret = clk_cpy_name(&parent->name, parent_names[i], 4016 true); 4017 } else if (parent_data) { 4018 parent->hw = parent_data[i].hw; 4019 parent->index = parent_data[i].index; 4020 ret = clk_cpy_name(&parent->fw_name, 4021 parent_data[i].fw_name, false); 4022 if (!ret) 4023 ret = clk_cpy_name(&parent->name, 4024 parent_data[i].name, 4025 false); 4026 } else if (parent_hws) { 4027 parent->hw = parent_hws[i]; 4028 } else { 4029 ret = -EINVAL; 4030 WARN(1, "Must specify parents if num_parents > 0\n"); 4031 } 4032 4033 if (ret) { 4034 do { 4035 kfree_const(parents[i].name); 4036 kfree_const(parents[i].fw_name); 4037 } while (--i >= 0); 4038 kfree(parents); 4039 4040 return ret; 4041 } 4042 } 4043 4044 return 0; 4045 } 4046 4047 static void clk_core_free_parent_map(struct clk_core *core) 4048 { 4049 int i = core->num_parents; 4050 4051 if (!core->num_parents) 4052 return; 4053 4054 while (--i >= 0) { 4055 kfree_const(core->parents[i].name); 4056 kfree_const(core->parents[i].fw_name); 4057 } 4058 4059 kfree(core->parents); 4060 } 4061 4062 static struct clk * 4063 __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) 4064 { 4065 int ret; 4066 struct clk_core *core; 4067 const struct clk_init_data *init = hw->init; 4068 4069 /* 4070 * The init data is not supposed to be used outside of registration path. 4071 * Set it to NULL so that provider drivers can't use it either and so that 4072 * we catch use of hw->init early on in the core. 4073 */ 4074 hw->init = NULL; 4075 4076 core = kzalloc(sizeof(*core), GFP_KERNEL); 4077 if (!core) { 4078 ret = -ENOMEM; 4079 goto fail_out; 4080 } 4081 4082 core->name = kstrdup_const(init->name, GFP_KERNEL); 4083 if (!core->name) { 4084 ret = -ENOMEM; 4085 goto fail_name; 4086 } 4087 4088 if (WARN_ON(!init->ops)) { 4089 ret = -EINVAL; 4090 goto fail_ops; 4091 } 4092 core->ops = init->ops; 4093 4094 if (dev && pm_runtime_enabled(dev)) 4095 core->rpm_enabled = true; 4096 core->dev = dev; 4097 core->of_node = np; 4098 if (dev && dev->driver) 4099 core->owner = dev->driver->owner; 4100 core->hw = hw; 4101 core->flags = init->flags; 4102 core->num_parents = init->num_parents; 4103 core->min_rate = 0; 4104 core->max_rate = ULONG_MAX; 4105 4106 ret = clk_core_populate_parent_map(core, init); 4107 if (ret) 4108 goto fail_parents; 4109 4110 INIT_HLIST_HEAD(&core->clks); 4111 4112 /* 4113 * Don't call clk_hw_create_clk() here because that would pin the 4114 * provider module to itself and prevent it from ever being removed. 4115 */ 4116 hw->clk = alloc_clk(core, NULL, NULL); 4117 if (IS_ERR(hw->clk)) { 4118 ret = PTR_ERR(hw->clk); 4119 goto fail_create_clk; 4120 } 4121 4122 clk_core_link_consumer(core, hw->clk); 4123 4124 ret = __clk_core_init(core); 4125 if (!ret) 4126 return hw->clk; 4127 4128 clk_prepare_lock(); 4129 clk_core_unlink_consumer(hw->clk); 4130 clk_prepare_unlock(); 4131 4132 free_clk(hw->clk); 4133 hw->clk = NULL; 4134 4135 fail_create_clk: 4136 clk_core_free_parent_map(core); 4137 fail_parents: 4138 fail_ops: 4139 kfree_const(core->name); 4140 fail_name: 4141 kfree(core); 4142 fail_out: 4143 return ERR_PTR(ret); 4144 } 4145 4146 /** 4147 * dev_or_parent_of_node() - Get device node of @dev or @dev's parent 4148 * @dev: Device to get device node of 4149 * 4150 * Return: device node pointer of @dev, or the device node pointer of 4151 * @dev->parent if dev doesn't have a device node, or NULL if neither 4152 * @dev or @dev->parent have a device node. 4153 */ 4154 static struct device_node *dev_or_parent_of_node(struct device *dev) 4155 { 4156 struct device_node *np; 4157 4158 if (!dev) 4159 return NULL; 4160 4161 np = dev_of_node(dev); 4162 if (!np) 4163 np = dev_of_node(dev->parent); 4164 4165 return np; 4166 } 4167 4168 /** 4169 * clk_register - allocate a new clock, register it and return an opaque cookie 4170 * @dev: device that is registering this clock 4171 * @hw: link to hardware-specific clock data 4172 * 4173 * clk_register is the *deprecated* interface for populating the clock tree with 4174 * new clock nodes. Use clk_hw_register() instead. 4175 * 4176 * Returns: a pointer to the newly allocated struct clk which 4177 * cannot be dereferenced by driver code but may be used in conjunction with the 4178 * rest of the clock API. In the event of an error clk_register will return an 4179 * error code; drivers must test for an error code after calling clk_register. 4180 */ 4181 struct clk *clk_register(struct device *dev, struct clk_hw *hw) 4182 { 4183 return __clk_register(dev, dev_or_parent_of_node(dev), hw); 4184 } 4185 EXPORT_SYMBOL_GPL(clk_register); 4186 4187 /** 4188 * clk_hw_register - register a clk_hw and return an error code 4189 * @dev: device that is registering this clock 4190 * @hw: link to hardware-specific clock data 4191 * 4192 * clk_hw_register is the primary interface for populating the clock tree with 4193 * new clock nodes. It returns an integer equal to zero indicating success or 4194 * less than zero indicating failure. Drivers must test for an error code after 4195 * calling clk_hw_register(). 4196 */ 4197 int clk_hw_register(struct device *dev, struct clk_hw *hw) 4198 { 4199 return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev), 4200 hw)); 4201 } 4202 EXPORT_SYMBOL_GPL(clk_hw_register); 4203 4204 /* 4205 * of_clk_hw_register - register a clk_hw and return an error code 4206 * @node: device_node of device that is registering this clock 4207 * @hw: link to hardware-specific clock data 4208 * 4209 * of_clk_hw_register() is the primary interface for populating the clock tree 4210 * with new clock nodes when a struct device is not available, but a struct 4211 * device_node is. It returns an integer equal to zero indicating success or 4212 * less than zero indicating failure. Drivers must test for an error code after 4213 * calling of_clk_hw_register(). 4214 */ 4215 int of_clk_hw_register(struct device_node *node, struct clk_hw *hw) 4216 { 4217 return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw)); 4218 } 4219 EXPORT_SYMBOL_GPL(of_clk_hw_register); 4220 4221 /* Free memory allocated for a clock. */ 4222 static void __clk_release(struct kref *ref) 4223 { 4224 struct clk_core *core = container_of(ref, struct clk_core, ref); 4225 4226 lockdep_assert_held(&prepare_lock); 4227 4228 clk_core_free_parent_map(core); 4229 kfree_const(core->name); 4230 kfree(core); 4231 } 4232 4233 /* 4234 * Empty clk_ops for unregistered clocks. These are used temporarily 4235 * after clk_unregister() was called on a clock and until last clock 4236 * consumer calls clk_put() and the struct clk object is freed. 4237 */ 4238 static int clk_nodrv_prepare_enable(struct clk_hw *hw) 4239 { 4240 return -ENXIO; 4241 } 4242 4243 static void clk_nodrv_disable_unprepare(struct clk_hw *hw) 4244 { 4245 WARN_ON_ONCE(1); 4246 } 4247 4248 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, 4249 unsigned long parent_rate) 4250 { 4251 return -ENXIO; 4252 } 4253 4254 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) 4255 { 4256 return -ENXIO; 4257 } 4258 4259 static const struct clk_ops clk_nodrv_ops = { 4260 .enable = clk_nodrv_prepare_enable, 4261 .disable = clk_nodrv_disable_unprepare, 4262 .prepare = clk_nodrv_prepare_enable, 4263 .unprepare = clk_nodrv_disable_unprepare, 4264 .set_rate = clk_nodrv_set_rate, 4265 .set_parent = clk_nodrv_set_parent, 4266 }; 4267 4268 static void clk_core_evict_parent_cache_subtree(struct clk_core *root, 4269 const struct clk_core *target) 4270 { 4271 int i; 4272 struct clk_core *child; 4273 4274 for (i = 0; i < root->num_parents; i++) 4275 if (root->parents[i].core == target) 4276 root->parents[i].core = NULL; 4277 4278 hlist_for_each_entry(child, &root->children, child_node) 4279 clk_core_evict_parent_cache_subtree(child, target); 4280 } 4281 4282 /* Remove this clk from all parent caches */ 4283 static void clk_core_evict_parent_cache(struct clk_core *core) 4284 { 4285 const struct hlist_head **lists; 4286 struct clk_core *root; 4287 4288 lockdep_assert_held(&prepare_lock); 4289 4290 for (lists = all_lists; *lists; lists++) 4291 hlist_for_each_entry(root, *lists, child_node) 4292 clk_core_evict_parent_cache_subtree(root, core); 4293 4294 } 4295 4296 /** 4297 * clk_unregister - unregister a currently registered clock 4298 * @clk: clock to unregister 4299 */ 4300 void clk_unregister(struct clk *clk) 4301 { 4302 unsigned long flags; 4303 const struct clk_ops *ops; 4304 4305 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 4306 return; 4307 4308 clk_debug_unregister(clk->core); 4309 4310 clk_prepare_lock(); 4311 4312 ops = clk->core->ops; 4313 if (ops == &clk_nodrv_ops) { 4314 pr_err("%s: unregistered clock: %s\n", __func__, 4315 clk->core->name); 4316 goto unlock; 4317 } 4318 /* 4319 * Assign empty clock ops for consumers that might still hold 4320 * a reference to this clock. 4321 */ 4322 flags = clk_enable_lock(); 4323 clk->core->ops = &clk_nodrv_ops; 4324 clk_enable_unlock(flags); 4325 4326 if (ops->terminate) 4327 ops->terminate(clk->core->hw); 4328 4329 if (!hlist_empty(&clk->core->children)) { 4330 struct clk_core *child; 4331 struct hlist_node *t; 4332 4333 /* Reparent all children to the orphan list. */ 4334 hlist_for_each_entry_safe(child, t, &clk->core->children, 4335 child_node) 4336 clk_core_set_parent_nolock(child, NULL); 4337 } 4338 4339 clk_core_evict_parent_cache(clk->core); 4340 4341 hlist_del_init(&clk->core->child_node); 4342 4343 if (clk->core->prepare_count) 4344 pr_warn("%s: unregistering prepared clock: %s\n", 4345 __func__, clk->core->name); 4346 4347 if (clk->core->protect_count) 4348 pr_warn("%s: unregistering protected clock: %s\n", 4349 __func__, clk->core->name); 4350 4351 kref_put(&clk->core->ref, __clk_release); 4352 free_clk(clk); 4353 unlock: 4354 clk_prepare_unlock(); 4355 } 4356 EXPORT_SYMBOL_GPL(clk_unregister); 4357 4358 /** 4359 * clk_hw_unregister - unregister a currently registered clk_hw 4360 * @hw: hardware-specific clock data to unregister 4361 */ 4362 void clk_hw_unregister(struct clk_hw *hw) 4363 { 4364 clk_unregister(hw->clk); 4365 } 4366 EXPORT_SYMBOL_GPL(clk_hw_unregister); 4367 4368 static void devm_clk_unregister_cb(struct device *dev, void *res) 4369 { 4370 clk_unregister(*(struct clk **)res); 4371 } 4372 4373 static void devm_clk_hw_unregister_cb(struct device *dev, void *res) 4374 { 4375 clk_hw_unregister(*(struct clk_hw **)res); 4376 } 4377 4378 /** 4379 * devm_clk_register - resource managed clk_register() 4380 * @dev: device that is registering this clock 4381 * @hw: link to hardware-specific clock data 4382 * 4383 * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead. 4384 * 4385 * Clocks returned from this function are automatically clk_unregister()ed on 4386 * driver detach. See clk_register() for more information. 4387 */ 4388 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 4389 { 4390 struct clk *clk; 4391 struct clk **clkp; 4392 4393 clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL); 4394 if (!clkp) 4395 return ERR_PTR(-ENOMEM); 4396 4397 clk = clk_register(dev, hw); 4398 if (!IS_ERR(clk)) { 4399 *clkp = clk; 4400 devres_add(dev, clkp); 4401 } else { 4402 devres_free(clkp); 4403 } 4404 4405 return clk; 4406 } 4407 EXPORT_SYMBOL_GPL(devm_clk_register); 4408 4409 /** 4410 * devm_clk_hw_register - resource managed clk_hw_register() 4411 * @dev: device that is registering this clock 4412 * @hw: link to hardware-specific clock data 4413 * 4414 * Managed clk_hw_register(). Clocks registered by this function are 4415 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register() 4416 * for more information. 4417 */ 4418 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw) 4419 { 4420 struct clk_hw **hwp; 4421 int ret; 4422 4423 hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL); 4424 if (!hwp) 4425 return -ENOMEM; 4426 4427 ret = clk_hw_register(dev, hw); 4428 if (!ret) { 4429 *hwp = hw; 4430 devres_add(dev, hwp); 4431 } else { 4432 devres_free(hwp); 4433 } 4434 4435 return ret; 4436 } 4437 EXPORT_SYMBOL_GPL(devm_clk_hw_register); 4438 4439 static void devm_clk_release(struct device *dev, void *res) 4440 { 4441 clk_put(*(struct clk **)res); 4442 } 4443 4444 /** 4445 * devm_clk_hw_get_clk - resource managed clk_hw_get_clk() 4446 * @dev: device that is registering this clock 4447 * @hw: clk_hw associated with the clk being consumed 4448 * @con_id: connection ID string on device 4449 * 4450 * Managed clk_hw_get_clk(). Clocks got with this function are 4451 * automatically clk_put() on driver detach. See clk_put() 4452 * for more information. 4453 */ 4454 struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw, 4455 const char *con_id) 4456 { 4457 struct clk *clk; 4458 struct clk **clkp; 4459 4460 /* This should not happen because it would mean we have drivers 4461 * passing around clk_hw pointers instead of having the caller use 4462 * proper clk_get() style APIs 4463 */ 4464 WARN_ON_ONCE(dev != hw->core->dev); 4465 4466 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); 4467 if (!clkp) 4468 return ERR_PTR(-ENOMEM); 4469 4470 clk = clk_hw_get_clk(hw, con_id); 4471 if (!IS_ERR(clk)) { 4472 *clkp = clk; 4473 devres_add(dev, clkp); 4474 } else { 4475 devres_free(clkp); 4476 } 4477 4478 return clk; 4479 } 4480 EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk); 4481 4482 /* 4483 * clkdev helpers 4484 */ 4485 4486 void __clk_put(struct clk *clk) 4487 { 4488 struct module *owner; 4489 4490 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 4491 return; 4492 4493 clk_prepare_lock(); 4494 4495 /* 4496 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a 4497 * given user should be balanced with calls to clk_rate_exclusive_put() 4498 * and by that same consumer 4499 */ 4500 if (WARN_ON(clk->exclusive_count)) { 4501 /* We voiced our concern, let's sanitize the situation */ 4502 clk->core->protect_count -= (clk->exclusive_count - 1); 4503 clk_core_rate_unprotect(clk->core); 4504 clk->exclusive_count = 0; 4505 } 4506 4507 hlist_del(&clk->clks_node); 4508 4509 /* If we had any boundaries on that clock, let's drop them. */ 4510 if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX) 4511 clk_set_rate_range_nolock(clk, 0, ULONG_MAX); 4512 4513 owner = clk->core->owner; 4514 kref_put(&clk->core->ref, __clk_release); 4515 4516 clk_prepare_unlock(); 4517 4518 module_put(owner); 4519 4520 free_clk(clk); 4521 } 4522 4523 /*** clk rate change notifiers ***/ 4524 4525 /** 4526 * clk_notifier_register - add a clk rate change notifier 4527 * @clk: struct clk * to watch 4528 * @nb: struct notifier_block * with callback info 4529 * 4530 * Request notification when clk's rate changes. This uses an SRCU 4531 * notifier because we want it to block and notifier unregistrations are 4532 * uncommon. The callbacks associated with the notifier must not 4533 * re-enter into the clk framework by calling any top-level clk APIs; 4534 * this will cause a nested prepare_lock mutex. 4535 * 4536 * In all notification cases (pre, post and abort rate change) the original 4537 * clock rate is passed to the callback via struct clk_notifier_data.old_rate 4538 * and the new frequency is passed via struct clk_notifier_data.new_rate. 4539 * 4540 * clk_notifier_register() must be called from non-atomic context. 4541 * Returns -EINVAL if called with null arguments, -ENOMEM upon 4542 * allocation failure; otherwise, passes along the return value of 4543 * srcu_notifier_chain_register(). 4544 */ 4545 int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 4546 { 4547 struct clk_notifier *cn; 4548 int ret = -ENOMEM; 4549 4550 if (!clk || !nb) 4551 return -EINVAL; 4552 4553 clk_prepare_lock(); 4554 4555 /* search the list of notifiers for this clk */ 4556 list_for_each_entry(cn, &clk_notifier_list, node) 4557 if (cn->clk == clk) 4558 goto found; 4559 4560 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 4561 cn = kzalloc(sizeof(*cn), GFP_KERNEL); 4562 if (!cn) 4563 goto out; 4564 4565 cn->clk = clk; 4566 srcu_init_notifier_head(&cn->notifier_head); 4567 4568 list_add(&cn->node, &clk_notifier_list); 4569 4570 found: 4571 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 4572 4573 clk->core->notifier_count++; 4574 4575 out: 4576 clk_prepare_unlock(); 4577 4578 return ret; 4579 } 4580 EXPORT_SYMBOL_GPL(clk_notifier_register); 4581 4582 /** 4583 * clk_notifier_unregister - remove a clk rate change notifier 4584 * @clk: struct clk * 4585 * @nb: struct notifier_block * with callback info 4586 * 4587 * Request no further notification for changes to 'clk' and frees memory 4588 * allocated in clk_notifier_register. 4589 * 4590 * Returns -EINVAL if called with null arguments; otherwise, passes 4591 * along the return value of srcu_notifier_chain_unregister(). 4592 */ 4593 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 4594 { 4595 struct clk_notifier *cn; 4596 int ret = -ENOENT; 4597 4598 if (!clk || !nb) 4599 return -EINVAL; 4600 4601 clk_prepare_lock(); 4602 4603 list_for_each_entry(cn, &clk_notifier_list, node) { 4604 if (cn->clk == clk) { 4605 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 4606 4607 clk->core->notifier_count--; 4608 4609 /* XXX the notifier code should handle this better */ 4610 if (!cn->notifier_head.head) { 4611 srcu_cleanup_notifier_head(&cn->notifier_head); 4612 list_del(&cn->node); 4613 kfree(cn); 4614 } 4615 break; 4616 } 4617 } 4618 4619 clk_prepare_unlock(); 4620 4621 return ret; 4622 } 4623 EXPORT_SYMBOL_GPL(clk_notifier_unregister); 4624 4625 struct clk_notifier_devres { 4626 struct clk *clk; 4627 struct notifier_block *nb; 4628 }; 4629 4630 static void devm_clk_notifier_release(struct device *dev, void *res) 4631 { 4632 struct clk_notifier_devres *devres = res; 4633 4634 clk_notifier_unregister(devres->clk, devres->nb); 4635 } 4636 4637 int devm_clk_notifier_register(struct device *dev, struct clk *clk, 4638 struct notifier_block *nb) 4639 { 4640 struct clk_notifier_devres *devres; 4641 int ret; 4642 4643 devres = devres_alloc(devm_clk_notifier_release, 4644 sizeof(*devres), GFP_KERNEL); 4645 4646 if (!devres) 4647 return -ENOMEM; 4648 4649 ret = clk_notifier_register(clk, nb); 4650 if (!ret) { 4651 devres->clk = clk; 4652 devres->nb = nb; 4653 } else { 4654 devres_free(devres); 4655 } 4656 4657 return ret; 4658 } 4659 EXPORT_SYMBOL_GPL(devm_clk_notifier_register); 4660 4661 #ifdef CONFIG_OF 4662 static void clk_core_reparent_orphans(void) 4663 { 4664 clk_prepare_lock(); 4665 clk_core_reparent_orphans_nolock(); 4666 clk_prepare_unlock(); 4667 } 4668 4669 /** 4670 * struct of_clk_provider - Clock provider registration structure 4671 * @link: Entry in global list of clock providers 4672 * @node: Pointer to device tree node of clock provider 4673 * @get: Get clock callback. Returns NULL or a struct clk for the 4674 * given clock specifier 4675 * @get_hw: Get clk_hw callback. Returns NULL, ERR_PTR or a 4676 * struct clk_hw for the given clock specifier 4677 * @data: context pointer to be passed into @get callback 4678 */ 4679 struct of_clk_provider { 4680 struct list_head link; 4681 4682 struct device_node *node; 4683 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 4684 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data); 4685 void *data; 4686 }; 4687 4688 extern struct of_device_id __clk_of_table; 4689 static const struct of_device_id __clk_of_table_sentinel 4690 __used __section("__clk_of_table_end"); 4691 4692 static LIST_HEAD(of_clk_providers); 4693 static DEFINE_MUTEX(of_clk_mutex); 4694 4695 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 4696 void *data) 4697 { 4698 return data; 4699 } 4700 EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 4701 4702 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) 4703 { 4704 return data; 4705 } 4706 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get); 4707 4708 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 4709 { 4710 struct clk_onecell_data *clk_data = data; 4711 unsigned int idx = clkspec->args[0]; 4712 4713 if (idx >= clk_data->clk_num) { 4714 pr_err("%s: invalid clock index %u\n", __func__, idx); 4715 return ERR_PTR(-EINVAL); 4716 } 4717 4718 return clk_data->clks[idx]; 4719 } 4720 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 4721 4722 struct clk_hw * 4723 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) 4724 { 4725 struct clk_hw_onecell_data *hw_data = data; 4726 unsigned int idx = clkspec->args[0]; 4727 4728 if (idx >= hw_data->num) { 4729 pr_err("%s: invalid index %u\n", __func__, idx); 4730 return ERR_PTR(-EINVAL); 4731 } 4732 4733 return hw_data->hws[idx]; 4734 } 4735 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get); 4736 4737 /** 4738 * of_clk_add_provider() - Register a clock provider for a node 4739 * @np: Device node pointer associated with clock provider 4740 * @clk_src_get: callback for decoding clock 4741 * @data: context pointer for @clk_src_get callback. 4742 * 4743 * This function is *deprecated*. Use of_clk_add_hw_provider() instead. 4744 */ 4745 int of_clk_add_provider(struct device_node *np, 4746 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 4747 void *data), 4748 void *data) 4749 { 4750 struct of_clk_provider *cp; 4751 int ret; 4752 4753 if (!np) 4754 return 0; 4755 4756 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 4757 if (!cp) 4758 return -ENOMEM; 4759 4760 cp->node = of_node_get(np); 4761 cp->data = data; 4762 cp->get = clk_src_get; 4763 4764 mutex_lock(&of_clk_mutex); 4765 list_add(&cp->link, &of_clk_providers); 4766 mutex_unlock(&of_clk_mutex); 4767 pr_debug("Added clock from %pOF\n", np); 4768 4769 clk_core_reparent_orphans(); 4770 4771 ret = of_clk_set_defaults(np, true); 4772 if (ret < 0) 4773 of_clk_del_provider(np); 4774 4775 fwnode_dev_initialized(&np->fwnode, true); 4776 4777 return ret; 4778 } 4779 EXPORT_SYMBOL_GPL(of_clk_add_provider); 4780 4781 /** 4782 * of_clk_add_hw_provider() - Register a clock provider for a node 4783 * @np: Device node pointer associated with clock provider 4784 * @get: callback for decoding clk_hw 4785 * @data: context pointer for @get callback. 4786 */ 4787 int of_clk_add_hw_provider(struct device_node *np, 4788 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 4789 void *data), 4790 void *data) 4791 { 4792 struct of_clk_provider *cp; 4793 int ret; 4794 4795 if (!np) 4796 return 0; 4797 4798 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 4799 if (!cp) 4800 return -ENOMEM; 4801 4802 cp->node = of_node_get(np); 4803 cp->data = data; 4804 cp->get_hw = get; 4805 4806 mutex_lock(&of_clk_mutex); 4807 list_add(&cp->link, &of_clk_providers); 4808 mutex_unlock(&of_clk_mutex); 4809 pr_debug("Added clk_hw provider from %pOF\n", np); 4810 4811 clk_core_reparent_orphans(); 4812 4813 ret = of_clk_set_defaults(np, true); 4814 if (ret < 0) 4815 of_clk_del_provider(np); 4816 4817 fwnode_dev_initialized(&np->fwnode, true); 4818 4819 return ret; 4820 } 4821 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider); 4822 4823 static void devm_of_clk_release_provider(struct device *dev, void *res) 4824 { 4825 of_clk_del_provider(*(struct device_node **)res); 4826 } 4827 4828 /* 4829 * We allow a child device to use its parent device as the clock provider node 4830 * for cases like MFD sub-devices where the child device driver wants to use 4831 * devm_*() APIs but not list the device in DT as a sub-node. 4832 */ 4833 static struct device_node *get_clk_provider_node(struct device *dev) 4834 { 4835 struct device_node *np, *parent_np; 4836 4837 np = dev->of_node; 4838 parent_np = dev->parent ? dev->parent->of_node : NULL; 4839 4840 if (!of_find_property(np, "#clock-cells", NULL)) 4841 if (of_find_property(parent_np, "#clock-cells", NULL)) 4842 np = parent_np; 4843 4844 return np; 4845 } 4846 4847 /** 4848 * devm_of_clk_add_hw_provider() - Managed clk provider node registration 4849 * @dev: Device acting as the clock provider (used for DT node and lifetime) 4850 * @get: callback for decoding clk_hw 4851 * @data: context pointer for @get callback 4852 * 4853 * Registers clock provider for given device's node. If the device has no DT 4854 * node or if the device node lacks of clock provider information (#clock-cells) 4855 * then the parent device's node is scanned for this information. If parent node 4856 * has the #clock-cells then it is used in registration. Provider is 4857 * automatically released at device exit. 4858 * 4859 * Return: 0 on success or an errno on failure. 4860 */ 4861 int devm_of_clk_add_hw_provider(struct device *dev, 4862 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 4863 void *data), 4864 void *data) 4865 { 4866 struct device_node **ptr, *np; 4867 int ret; 4868 4869 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr), 4870 GFP_KERNEL); 4871 if (!ptr) 4872 return -ENOMEM; 4873 4874 np = get_clk_provider_node(dev); 4875 ret = of_clk_add_hw_provider(np, get, data); 4876 if (!ret) { 4877 *ptr = np; 4878 devres_add(dev, ptr); 4879 } else { 4880 devres_free(ptr); 4881 } 4882 4883 return ret; 4884 } 4885 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider); 4886 4887 /** 4888 * of_clk_del_provider() - Remove a previously registered clock provider 4889 * @np: Device node pointer associated with clock provider 4890 */ 4891 void of_clk_del_provider(struct device_node *np) 4892 { 4893 struct of_clk_provider *cp; 4894 4895 if (!np) 4896 return; 4897 4898 mutex_lock(&of_clk_mutex); 4899 list_for_each_entry(cp, &of_clk_providers, link) { 4900 if (cp->node == np) { 4901 list_del(&cp->link); 4902 fwnode_dev_initialized(&np->fwnode, false); 4903 of_node_put(cp->node); 4904 kfree(cp); 4905 break; 4906 } 4907 } 4908 mutex_unlock(&of_clk_mutex); 4909 } 4910 EXPORT_SYMBOL_GPL(of_clk_del_provider); 4911 4912 /** 4913 * of_parse_clkspec() - Parse a DT clock specifier for a given device node 4914 * @np: device node to parse clock specifier from 4915 * @index: index of phandle to parse clock out of. If index < 0, @name is used 4916 * @name: clock name to find and parse. If name is NULL, the index is used 4917 * @out_args: Result of parsing the clock specifier 4918 * 4919 * Parses a device node's "clocks" and "clock-names" properties to find the 4920 * phandle and cells for the index or name that is desired. The resulting clock 4921 * specifier is placed into @out_args, or an errno is returned when there's a 4922 * parsing error. The @index argument is ignored if @name is non-NULL. 4923 * 4924 * Example: 4925 * 4926 * phandle1: clock-controller@1 { 4927 * #clock-cells = <2>; 4928 * } 4929 * 4930 * phandle2: clock-controller@2 { 4931 * #clock-cells = <1>; 4932 * } 4933 * 4934 * clock-consumer@3 { 4935 * clocks = <&phandle1 1 2 &phandle2 3>; 4936 * clock-names = "name1", "name2"; 4937 * } 4938 * 4939 * To get a device_node for `clock-controller@2' node you may call this 4940 * function a few different ways: 4941 * 4942 * of_parse_clkspec(clock-consumer@3, -1, "name2", &args); 4943 * of_parse_clkspec(clock-consumer@3, 1, NULL, &args); 4944 * of_parse_clkspec(clock-consumer@3, 1, "name2", &args); 4945 * 4946 * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT 4947 * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in 4948 * the "clock-names" property of @np. 4949 */ 4950 static int of_parse_clkspec(const struct device_node *np, int index, 4951 const char *name, struct of_phandle_args *out_args) 4952 { 4953 int ret = -ENOENT; 4954 4955 /* Walk up the tree of devices looking for a clock property that matches */ 4956 while (np) { 4957 /* 4958 * For named clocks, first look up the name in the 4959 * "clock-names" property. If it cannot be found, then index 4960 * will be an error code and of_parse_phandle_with_args() will 4961 * return -EINVAL. 4962 */ 4963 if (name) 4964 index = of_property_match_string(np, "clock-names", name); 4965 ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 4966 index, out_args); 4967 if (!ret) 4968 break; 4969 if (name && index >= 0) 4970 break; 4971 4972 /* 4973 * No matching clock found on this node. If the parent node 4974 * has a "clock-ranges" property, then we can try one of its 4975 * clocks. 4976 */ 4977 np = np->parent; 4978 if (np && !of_get_property(np, "clock-ranges", NULL)) 4979 break; 4980 index = 0; 4981 } 4982 4983 return ret; 4984 } 4985 4986 static struct clk_hw * 4987 __of_clk_get_hw_from_provider(struct of_clk_provider *provider, 4988 struct of_phandle_args *clkspec) 4989 { 4990 struct clk *clk; 4991 4992 if (provider->get_hw) 4993 return provider->get_hw(clkspec, provider->data); 4994 4995 clk = provider->get(clkspec, provider->data); 4996 if (IS_ERR(clk)) 4997 return ERR_CAST(clk); 4998 return __clk_get_hw(clk); 4999 } 5000 5001 static struct clk_hw * 5002 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) 5003 { 5004 struct of_clk_provider *provider; 5005 struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER); 5006 5007 if (!clkspec) 5008 return ERR_PTR(-EINVAL); 5009 5010 mutex_lock(&of_clk_mutex); 5011 list_for_each_entry(provider, &of_clk_providers, link) { 5012 if (provider->node == clkspec->np) { 5013 hw = __of_clk_get_hw_from_provider(provider, clkspec); 5014 if (!IS_ERR(hw)) 5015 break; 5016 } 5017 } 5018 mutex_unlock(&of_clk_mutex); 5019 5020 return hw; 5021 } 5022 5023 /** 5024 * of_clk_get_from_provider() - Lookup a clock from a clock provider 5025 * @clkspec: pointer to a clock specifier data structure 5026 * 5027 * This function looks up a struct clk from the registered list of clock 5028 * providers, an input is a clock specifier data structure as returned 5029 * from the of_parse_phandle_with_args() function call. 5030 */ 5031 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 5032 { 5033 struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec); 5034 5035 return clk_hw_create_clk(NULL, hw, NULL, __func__); 5036 } 5037 EXPORT_SYMBOL_GPL(of_clk_get_from_provider); 5038 5039 struct clk_hw *of_clk_get_hw(struct device_node *np, int index, 5040 const char *con_id) 5041 { 5042 int ret; 5043 struct clk_hw *hw; 5044 struct of_phandle_args clkspec; 5045 5046 ret = of_parse_clkspec(np, index, con_id, &clkspec); 5047 if (ret) 5048 return ERR_PTR(ret); 5049 5050 hw = of_clk_get_hw_from_clkspec(&clkspec); 5051 of_node_put(clkspec.np); 5052 5053 return hw; 5054 } 5055 5056 static struct clk *__of_clk_get(struct device_node *np, 5057 int index, const char *dev_id, 5058 const char *con_id) 5059 { 5060 struct clk_hw *hw = of_clk_get_hw(np, index, con_id); 5061 5062 return clk_hw_create_clk(NULL, hw, dev_id, con_id); 5063 } 5064 5065 struct clk *of_clk_get(struct device_node *np, int index) 5066 { 5067 return __of_clk_get(np, index, np->full_name, NULL); 5068 } 5069 EXPORT_SYMBOL(of_clk_get); 5070 5071 /** 5072 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node 5073 * @np: pointer to clock consumer node 5074 * @name: name of consumer's clock input, or NULL for the first clock reference 5075 * 5076 * This function parses the clocks and clock-names properties, 5077 * and uses them to look up the struct clk from the registered list of clock 5078 * providers. 5079 */ 5080 struct clk *of_clk_get_by_name(struct device_node *np, const char *name) 5081 { 5082 if (!np) 5083 return ERR_PTR(-ENOENT); 5084 5085 return __of_clk_get(np, 0, np->full_name, name); 5086 } 5087 EXPORT_SYMBOL(of_clk_get_by_name); 5088 5089 /** 5090 * of_clk_get_parent_count() - Count the number of clocks a device node has 5091 * @np: device node to count 5092 * 5093 * Returns: The number of clocks that are possible parents of this node 5094 */ 5095 unsigned int of_clk_get_parent_count(const struct device_node *np) 5096 { 5097 int count; 5098 5099 count = of_count_phandle_with_args(np, "clocks", "#clock-cells"); 5100 if (count < 0) 5101 return 0; 5102 5103 return count; 5104 } 5105 EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 5106 5107 const char *of_clk_get_parent_name(const struct device_node *np, int index) 5108 { 5109 struct of_phandle_args clkspec; 5110 struct property *prop; 5111 const char *clk_name; 5112 const __be32 *vp; 5113 u32 pv; 5114 int rc; 5115 int count; 5116 struct clk *clk; 5117 5118 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 5119 &clkspec); 5120 if (rc) 5121 return NULL; 5122 5123 index = clkspec.args_count ? clkspec.args[0] : 0; 5124 count = 0; 5125 5126 /* if there is an indices property, use it to transfer the index 5127 * specified into an array offset for the clock-output-names property. 5128 */ 5129 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { 5130 if (index == pv) { 5131 index = count; 5132 break; 5133 } 5134 count++; 5135 } 5136 /* We went off the end of 'clock-indices' without finding it */ 5137 if (prop && !vp) 5138 return NULL; 5139 5140 if (of_property_read_string_index(clkspec.np, "clock-output-names", 5141 index, 5142 &clk_name) < 0) { 5143 /* 5144 * Best effort to get the name if the clock has been 5145 * registered with the framework. If the clock isn't 5146 * registered, we return the node name as the name of 5147 * the clock as long as #clock-cells = 0. 5148 */ 5149 clk = of_clk_get_from_provider(&clkspec); 5150 if (IS_ERR(clk)) { 5151 if (clkspec.args_count == 0) 5152 clk_name = clkspec.np->name; 5153 else 5154 clk_name = NULL; 5155 } else { 5156 clk_name = __clk_get_name(clk); 5157 clk_put(clk); 5158 } 5159 } 5160 5161 5162 of_node_put(clkspec.np); 5163 return clk_name; 5164 } 5165 EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 5166 5167 /** 5168 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return 5169 * number of parents 5170 * @np: Device node pointer associated with clock provider 5171 * @parents: pointer to char array that hold the parents' names 5172 * @size: size of the @parents array 5173 * 5174 * Return: number of parents for the clock node. 5175 */ 5176 int of_clk_parent_fill(struct device_node *np, const char **parents, 5177 unsigned int size) 5178 { 5179 unsigned int i = 0; 5180 5181 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) 5182 i++; 5183 5184 return i; 5185 } 5186 EXPORT_SYMBOL_GPL(of_clk_parent_fill); 5187 5188 struct clock_provider { 5189 void (*clk_init_cb)(struct device_node *); 5190 struct device_node *np; 5191 struct list_head node; 5192 }; 5193 5194 /* 5195 * This function looks for a parent clock. If there is one, then it 5196 * checks that the provider for this parent clock was initialized, in 5197 * this case the parent clock will be ready. 5198 */ 5199 static int parent_ready(struct device_node *np) 5200 { 5201 int i = 0; 5202 5203 while (true) { 5204 struct clk *clk = of_clk_get(np, i); 5205 5206 /* this parent is ready we can check the next one */ 5207 if (!IS_ERR(clk)) { 5208 clk_put(clk); 5209 i++; 5210 continue; 5211 } 5212 5213 /* at least one parent is not ready, we exit now */ 5214 if (PTR_ERR(clk) == -EPROBE_DEFER) 5215 return 0; 5216 5217 /* 5218 * Here we make assumption that the device tree is 5219 * written correctly. So an error means that there is 5220 * no more parent. As we didn't exit yet, then the 5221 * previous parent are ready. If there is no clock 5222 * parent, no need to wait for them, then we can 5223 * consider their absence as being ready 5224 */ 5225 return 1; 5226 } 5227 } 5228 5229 /** 5230 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree 5231 * @np: Device node pointer associated with clock provider 5232 * @index: clock index 5233 * @flags: pointer to top-level framework flags 5234 * 5235 * Detects if the clock-critical property exists and, if so, sets the 5236 * corresponding CLK_IS_CRITICAL flag. 5237 * 5238 * Do not use this function. It exists only for legacy Device Tree 5239 * bindings, such as the one-clock-per-node style that are outdated. 5240 * Those bindings typically put all clock data into .dts and the Linux 5241 * driver has no clock data, thus making it impossible to set this flag 5242 * correctly from the driver. Only those drivers may call 5243 * of_clk_detect_critical from their setup functions. 5244 * 5245 * Return: error code or zero on success 5246 */ 5247 int of_clk_detect_critical(struct device_node *np, int index, 5248 unsigned long *flags) 5249 { 5250 struct property *prop; 5251 const __be32 *cur; 5252 uint32_t idx; 5253 5254 if (!np || !flags) 5255 return -EINVAL; 5256 5257 of_property_for_each_u32(np, "clock-critical", prop, cur, idx) 5258 if (index == idx) 5259 *flags |= CLK_IS_CRITICAL; 5260 5261 return 0; 5262 } 5263 5264 /** 5265 * of_clk_init() - Scan and init clock providers from the DT 5266 * @matches: array of compatible values and init functions for providers. 5267 * 5268 * This function scans the device tree for matching clock providers 5269 * and calls their initialization functions. It also does it by trying 5270 * to follow the dependencies. 5271 */ 5272 void __init of_clk_init(const struct of_device_id *matches) 5273 { 5274 const struct of_device_id *match; 5275 struct device_node *np; 5276 struct clock_provider *clk_provider, *next; 5277 bool is_init_done; 5278 bool force = false; 5279 LIST_HEAD(clk_provider_list); 5280 5281 if (!matches) 5282 matches = &__clk_of_table; 5283 5284 /* First prepare the list of the clocks providers */ 5285 for_each_matching_node_and_match(np, matches, &match) { 5286 struct clock_provider *parent; 5287 5288 if (!of_device_is_available(np)) 5289 continue; 5290 5291 parent = kzalloc(sizeof(*parent), GFP_KERNEL); 5292 if (!parent) { 5293 list_for_each_entry_safe(clk_provider, next, 5294 &clk_provider_list, node) { 5295 list_del(&clk_provider->node); 5296 of_node_put(clk_provider->np); 5297 kfree(clk_provider); 5298 } 5299 of_node_put(np); 5300 return; 5301 } 5302 5303 parent->clk_init_cb = match->data; 5304 parent->np = of_node_get(np); 5305 list_add_tail(&parent->node, &clk_provider_list); 5306 } 5307 5308 while (!list_empty(&clk_provider_list)) { 5309 is_init_done = false; 5310 list_for_each_entry_safe(clk_provider, next, 5311 &clk_provider_list, node) { 5312 if (force || parent_ready(clk_provider->np)) { 5313 5314 /* Don't populate platform devices */ 5315 of_node_set_flag(clk_provider->np, 5316 OF_POPULATED); 5317 5318 clk_provider->clk_init_cb(clk_provider->np); 5319 of_clk_set_defaults(clk_provider->np, true); 5320 5321 list_del(&clk_provider->node); 5322 of_node_put(clk_provider->np); 5323 kfree(clk_provider); 5324 is_init_done = true; 5325 } 5326 } 5327 5328 /* 5329 * We didn't manage to initialize any of the 5330 * remaining providers during the last loop, so now we 5331 * initialize all the remaining ones unconditionally 5332 * in case the clock parent was not mandatory 5333 */ 5334 if (!is_init_done) 5335 force = true; 5336 } 5337 } 5338 #endif 5339