1 /* 2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * Standard functionality for the common clock API. See Documentation/clk.txt 10 */ 11 12 #include <linux/clk-private.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/spinlock.h> 16 #include <linux/err.h> 17 #include <linux/list.h> 18 #include <linux/slab.h> 19 20 static DEFINE_SPINLOCK(enable_lock); 21 static DEFINE_MUTEX(prepare_lock); 22 23 static HLIST_HEAD(clk_root_list); 24 static HLIST_HEAD(clk_orphan_list); 25 static LIST_HEAD(clk_notifier_list); 26 27 /*** debugfs support ***/ 28 29 #ifdef CONFIG_COMMON_CLK_DEBUG 30 #include <linux/debugfs.h> 31 32 static struct dentry *rootdir; 33 static struct dentry *orphandir; 34 static int inited = 0; 35 36 /* caller must hold prepare_lock */ 37 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry) 38 { 39 struct dentry *d; 40 int ret = -ENOMEM; 41 42 if (!clk || !pdentry) { 43 ret = -EINVAL; 44 goto out; 45 } 46 47 d = debugfs_create_dir(clk->name, pdentry); 48 if (!d) 49 goto out; 50 51 clk->dentry = d; 52 53 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry, 54 (u32 *)&clk->rate); 55 if (!d) 56 goto err_out; 57 58 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry, 59 (u32 *)&clk->flags); 60 if (!d) 61 goto err_out; 62 63 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry, 64 (u32 *)&clk->prepare_count); 65 if (!d) 66 goto err_out; 67 68 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry, 69 (u32 *)&clk->enable_count); 70 if (!d) 71 goto err_out; 72 73 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry, 74 (u32 *)&clk->notifier_count); 75 if (!d) 76 goto err_out; 77 78 ret = 0; 79 goto out; 80 81 err_out: 82 debugfs_remove(clk->dentry); 83 out: 84 return ret; 85 } 86 87 /* caller must hold prepare_lock */ 88 static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry) 89 { 90 struct clk *child; 91 struct hlist_node *tmp; 92 int ret = -EINVAL;; 93 94 if (!clk || !pdentry) 95 goto out; 96 97 ret = clk_debug_create_one(clk, pdentry); 98 99 if (ret) 100 goto out; 101 102 hlist_for_each_entry(child, tmp, &clk->children, child_node) 103 clk_debug_create_subtree(child, clk->dentry); 104 105 ret = 0; 106 out: 107 return ret; 108 } 109 110 /** 111 * clk_debug_register - add a clk node to the debugfs clk tree 112 * @clk: the clk being added to the debugfs clk tree 113 * 114 * Dynamically adds a clk to the debugfs clk tree if debugfs has been 115 * initialized. Otherwise it bails out early since the debugfs clk tree 116 * will be created lazily by clk_debug_init as part of a late_initcall. 117 * 118 * Caller must hold prepare_lock. Only clk_init calls this function (so 119 * far) so this is taken care. 120 */ 121 static int clk_debug_register(struct clk *clk) 122 { 123 struct clk *parent; 124 struct dentry *pdentry; 125 int ret = 0; 126 127 if (!inited) 128 goto out; 129 130 parent = clk->parent; 131 132 /* 133 * Check to see if a clk is a root clk. Also check that it is 134 * safe to add this clk to debugfs 135 */ 136 if (!parent) 137 if (clk->flags & CLK_IS_ROOT) 138 pdentry = rootdir; 139 else 140 pdentry = orphandir; 141 else 142 if (parent->dentry) 143 pdentry = parent->dentry; 144 else 145 goto out; 146 147 ret = clk_debug_create_subtree(clk, pdentry); 148 149 out: 150 return ret; 151 } 152 153 /** 154 * clk_debug_init - lazily create the debugfs clk tree visualization 155 * 156 * clks are often initialized very early during boot before memory can 157 * be dynamically allocated and well before debugfs is setup. 158 * clk_debug_init walks the clk tree hierarchy while holding 159 * prepare_lock and creates the topology as part of a late_initcall, 160 * thus insuring that clks initialized very early will still be 161 * represented in the debugfs clk tree. This function should only be 162 * called once at boot-time, and all other clks added dynamically will 163 * be done so with clk_debug_register. 164 */ 165 static int __init clk_debug_init(void) 166 { 167 struct clk *clk; 168 struct hlist_node *tmp; 169 170 rootdir = debugfs_create_dir("clk", NULL); 171 172 if (!rootdir) 173 return -ENOMEM; 174 175 orphandir = debugfs_create_dir("orphans", rootdir); 176 177 if (!orphandir) 178 return -ENOMEM; 179 180 mutex_lock(&prepare_lock); 181 182 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node) 183 clk_debug_create_subtree(clk, rootdir); 184 185 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node) 186 clk_debug_create_subtree(clk, orphandir); 187 188 inited = 1; 189 190 mutex_unlock(&prepare_lock); 191 192 return 0; 193 } 194 late_initcall(clk_debug_init); 195 #else 196 static inline int clk_debug_register(struct clk *clk) { return 0; } 197 #endif 198 199 /* caller must hold prepare_lock */ 200 static void clk_disable_unused_subtree(struct clk *clk) 201 { 202 struct clk *child; 203 struct hlist_node *tmp; 204 unsigned long flags; 205 206 if (!clk) 207 goto out; 208 209 hlist_for_each_entry(child, tmp, &clk->children, child_node) 210 clk_disable_unused_subtree(child); 211 212 spin_lock_irqsave(&enable_lock, flags); 213 214 if (clk->enable_count) 215 goto unlock_out; 216 217 if (clk->flags & CLK_IGNORE_UNUSED) 218 goto unlock_out; 219 220 if (__clk_is_enabled(clk) && clk->ops->disable) 221 clk->ops->disable(clk->hw); 222 223 unlock_out: 224 spin_unlock_irqrestore(&enable_lock, flags); 225 226 out: 227 return; 228 } 229 230 static int clk_disable_unused(void) 231 { 232 struct clk *clk; 233 struct hlist_node *tmp; 234 235 mutex_lock(&prepare_lock); 236 237 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node) 238 clk_disable_unused_subtree(clk); 239 240 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node) 241 clk_disable_unused_subtree(clk); 242 243 mutex_unlock(&prepare_lock); 244 245 return 0; 246 } 247 late_initcall(clk_disable_unused); 248 249 /*** helper functions ***/ 250 251 inline const char *__clk_get_name(struct clk *clk) 252 { 253 return !clk ? NULL : clk->name; 254 } 255 256 inline struct clk_hw *__clk_get_hw(struct clk *clk) 257 { 258 return !clk ? NULL : clk->hw; 259 } 260 261 inline u8 __clk_get_num_parents(struct clk *clk) 262 { 263 return !clk ? -EINVAL : clk->num_parents; 264 } 265 266 inline struct clk *__clk_get_parent(struct clk *clk) 267 { 268 return !clk ? NULL : clk->parent; 269 } 270 271 inline int __clk_get_enable_count(struct clk *clk) 272 { 273 return !clk ? -EINVAL : clk->enable_count; 274 } 275 276 inline int __clk_get_prepare_count(struct clk *clk) 277 { 278 return !clk ? -EINVAL : clk->prepare_count; 279 } 280 281 unsigned long __clk_get_rate(struct clk *clk) 282 { 283 unsigned long ret; 284 285 if (!clk) { 286 ret = 0; 287 goto out; 288 } 289 290 ret = clk->rate; 291 292 if (clk->flags & CLK_IS_ROOT) 293 goto out; 294 295 if (!clk->parent) 296 ret = 0; 297 298 out: 299 return ret; 300 } 301 302 inline unsigned long __clk_get_flags(struct clk *clk) 303 { 304 return !clk ? -EINVAL : clk->flags; 305 } 306 307 int __clk_is_enabled(struct clk *clk) 308 { 309 int ret; 310 311 if (!clk) 312 return -EINVAL; 313 314 /* 315 * .is_enabled is only mandatory for clocks that gate 316 * fall back to software usage counter if .is_enabled is missing 317 */ 318 if (!clk->ops->is_enabled) { 319 ret = clk->enable_count ? 1 : 0; 320 goto out; 321 } 322 323 ret = clk->ops->is_enabled(clk->hw); 324 out: 325 return ret; 326 } 327 328 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk) 329 { 330 struct clk *child; 331 struct clk *ret; 332 struct hlist_node *tmp; 333 334 if (!strcmp(clk->name, name)) 335 return clk; 336 337 hlist_for_each_entry(child, tmp, &clk->children, child_node) { 338 ret = __clk_lookup_subtree(name, child); 339 if (ret) 340 return ret; 341 } 342 343 return NULL; 344 } 345 346 struct clk *__clk_lookup(const char *name) 347 { 348 struct clk *root_clk; 349 struct clk *ret; 350 struct hlist_node *tmp; 351 352 if (!name) 353 return NULL; 354 355 /* search the 'proper' clk tree first */ 356 hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) { 357 ret = __clk_lookup_subtree(name, root_clk); 358 if (ret) 359 return ret; 360 } 361 362 /* if not found, then search the orphan tree */ 363 hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) { 364 ret = __clk_lookup_subtree(name, root_clk); 365 if (ret) 366 return ret; 367 } 368 369 return NULL; 370 } 371 372 /*** clk api ***/ 373 374 void __clk_unprepare(struct clk *clk) 375 { 376 if (!clk) 377 return; 378 379 if (WARN_ON(clk->prepare_count == 0)) 380 return; 381 382 if (--clk->prepare_count > 0) 383 return; 384 385 WARN_ON(clk->enable_count > 0); 386 387 if (clk->ops->unprepare) 388 clk->ops->unprepare(clk->hw); 389 390 __clk_unprepare(clk->parent); 391 } 392 393 /** 394 * clk_unprepare - undo preparation of a clock source 395 * @clk: the clk being unprepare 396 * 397 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 398 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 399 * if the operation may sleep. One example is a clk which is accessed over 400 * I2c. In the complex case a clk gate operation may require a fast and a slow 401 * part. It is this reason that clk_unprepare and clk_disable are not mutually 402 * exclusive. In fact clk_disable must be called before clk_unprepare. 403 */ 404 void clk_unprepare(struct clk *clk) 405 { 406 mutex_lock(&prepare_lock); 407 __clk_unprepare(clk); 408 mutex_unlock(&prepare_lock); 409 } 410 EXPORT_SYMBOL_GPL(clk_unprepare); 411 412 int __clk_prepare(struct clk *clk) 413 { 414 int ret = 0; 415 416 if (!clk) 417 return 0; 418 419 if (clk->prepare_count == 0) { 420 ret = __clk_prepare(clk->parent); 421 if (ret) 422 return ret; 423 424 if (clk->ops->prepare) { 425 ret = clk->ops->prepare(clk->hw); 426 if (ret) { 427 __clk_unprepare(clk->parent); 428 return ret; 429 } 430 } 431 } 432 433 clk->prepare_count++; 434 435 return 0; 436 } 437 438 /** 439 * clk_prepare - prepare a clock source 440 * @clk: the clk being prepared 441 * 442 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 443 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 444 * operation may sleep. One example is a clk which is accessed over I2c. In 445 * the complex case a clk ungate operation may require a fast and a slow part. 446 * It is this reason that clk_prepare and clk_enable are not mutually 447 * exclusive. In fact clk_prepare must be called before clk_enable. 448 * Returns 0 on success, -EERROR otherwise. 449 */ 450 int clk_prepare(struct clk *clk) 451 { 452 int ret; 453 454 mutex_lock(&prepare_lock); 455 ret = __clk_prepare(clk); 456 mutex_unlock(&prepare_lock); 457 458 return ret; 459 } 460 EXPORT_SYMBOL_GPL(clk_prepare); 461 462 static void __clk_disable(struct clk *clk) 463 { 464 if (!clk) 465 return; 466 467 if (WARN_ON(clk->enable_count == 0)) 468 return; 469 470 if (--clk->enable_count > 0) 471 return; 472 473 if (clk->ops->disable) 474 clk->ops->disable(clk->hw); 475 476 __clk_disable(clk->parent); 477 } 478 479 /** 480 * clk_disable - gate a clock 481 * @clk: the clk being gated 482 * 483 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 484 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 485 * clk if the operation is fast and will never sleep. One example is a 486 * SoC-internal clk which is controlled via simple register writes. In the 487 * complex case a clk gate operation may require a fast and a slow part. It is 488 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 489 * In fact clk_disable must be called before clk_unprepare. 490 */ 491 void clk_disable(struct clk *clk) 492 { 493 unsigned long flags; 494 495 spin_lock_irqsave(&enable_lock, flags); 496 __clk_disable(clk); 497 spin_unlock_irqrestore(&enable_lock, flags); 498 } 499 EXPORT_SYMBOL_GPL(clk_disable); 500 501 static int __clk_enable(struct clk *clk) 502 { 503 int ret = 0; 504 505 if (!clk) 506 return 0; 507 508 if (WARN_ON(clk->prepare_count == 0)) 509 return -ESHUTDOWN; 510 511 if (clk->enable_count == 0) { 512 ret = __clk_enable(clk->parent); 513 514 if (ret) 515 return ret; 516 517 if (clk->ops->enable) { 518 ret = clk->ops->enable(clk->hw); 519 if (ret) { 520 __clk_disable(clk->parent); 521 return ret; 522 } 523 } 524 } 525 526 clk->enable_count++; 527 return 0; 528 } 529 530 /** 531 * clk_enable - ungate a clock 532 * @clk: the clk being ungated 533 * 534 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 535 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 536 * if the operation will never sleep. One example is a SoC-internal clk which 537 * is controlled via simple register writes. In the complex case a clk ungate 538 * operation may require a fast and a slow part. It is this reason that 539 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 540 * must be called before clk_enable. Returns 0 on success, -EERROR 541 * otherwise. 542 */ 543 int clk_enable(struct clk *clk) 544 { 545 unsigned long flags; 546 int ret; 547 548 spin_lock_irqsave(&enable_lock, flags); 549 ret = __clk_enable(clk); 550 spin_unlock_irqrestore(&enable_lock, flags); 551 552 return ret; 553 } 554 EXPORT_SYMBOL_GPL(clk_enable); 555 556 /** 557 * clk_get_rate - return the rate of clk 558 * @clk: the clk whose rate is being returned 559 * 560 * Simply returns the cached rate of the clk. Does not query the hardware. If 561 * clk is NULL then returns 0. 562 */ 563 unsigned long clk_get_rate(struct clk *clk) 564 { 565 unsigned long rate; 566 567 mutex_lock(&prepare_lock); 568 rate = __clk_get_rate(clk); 569 mutex_unlock(&prepare_lock); 570 571 return rate; 572 } 573 EXPORT_SYMBOL_GPL(clk_get_rate); 574 575 /** 576 * __clk_round_rate - round the given rate for a clk 577 * @clk: round the rate of this clock 578 * 579 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate 580 */ 581 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) 582 { 583 unsigned long parent_rate = 0; 584 585 if (!clk) 586 return -EINVAL; 587 588 if (!clk->ops->round_rate) { 589 if (clk->flags & CLK_SET_RATE_PARENT) 590 return __clk_round_rate(clk->parent, rate); 591 else 592 return clk->rate; 593 } 594 595 if (clk->parent) 596 parent_rate = clk->parent->rate; 597 598 return clk->ops->round_rate(clk->hw, rate, &parent_rate); 599 } 600 601 /** 602 * clk_round_rate - round the given rate for a clk 603 * @clk: the clk for which we are rounding a rate 604 * @rate: the rate which is to be rounded 605 * 606 * Takes in a rate as input and rounds it to a rate that the clk can actually 607 * use which is then returned. If clk doesn't support round_rate operation 608 * then the parent rate is returned. 609 */ 610 long clk_round_rate(struct clk *clk, unsigned long rate) 611 { 612 unsigned long ret; 613 614 mutex_lock(&prepare_lock); 615 ret = __clk_round_rate(clk, rate); 616 mutex_unlock(&prepare_lock); 617 618 return ret; 619 } 620 EXPORT_SYMBOL_GPL(clk_round_rate); 621 622 /** 623 * __clk_notify - call clk notifier chain 624 * @clk: struct clk * that is changing rate 625 * @msg: clk notifier type (see include/linux/clk.h) 626 * @old_rate: old clk rate 627 * @new_rate: new clk rate 628 * 629 * Triggers a notifier call chain on the clk rate-change notification 630 * for 'clk'. Passes a pointer to the struct clk and the previous 631 * and current rates to the notifier callback. Intended to be called by 632 * internal clock code only. Returns NOTIFY_DONE from the last driver 633 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 634 * a driver returns that. 635 */ 636 static int __clk_notify(struct clk *clk, unsigned long msg, 637 unsigned long old_rate, unsigned long new_rate) 638 { 639 struct clk_notifier *cn; 640 struct clk_notifier_data cnd; 641 int ret = NOTIFY_DONE; 642 643 cnd.clk = clk; 644 cnd.old_rate = old_rate; 645 cnd.new_rate = new_rate; 646 647 list_for_each_entry(cn, &clk_notifier_list, node) { 648 if (cn->clk == clk) { 649 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 650 &cnd); 651 break; 652 } 653 } 654 655 return ret; 656 } 657 658 /** 659 * __clk_recalc_rates 660 * @clk: first clk in the subtree 661 * @msg: notification type (see include/linux/clk.h) 662 * 663 * Walks the subtree of clks starting with clk and recalculates rates as it 664 * goes. Note that if a clk does not implement the .recalc_rate callback then 665 * it is assumed that the clock will take on the rate of it's parent. 666 * 667 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 668 * if necessary. 669 * 670 * Caller must hold prepare_lock. 671 */ 672 static void __clk_recalc_rates(struct clk *clk, unsigned long msg) 673 { 674 unsigned long old_rate; 675 unsigned long parent_rate = 0; 676 struct hlist_node *tmp; 677 struct clk *child; 678 679 old_rate = clk->rate; 680 681 if (clk->parent) 682 parent_rate = clk->parent->rate; 683 684 if (clk->ops->recalc_rate) 685 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate); 686 else 687 clk->rate = parent_rate; 688 689 /* 690 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 691 * & ABORT_RATE_CHANGE notifiers 692 */ 693 if (clk->notifier_count && msg) 694 __clk_notify(clk, msg, old_rate, clk->rate); 695 696 hlist_for_each_entry(child, tmp, &clk->children, child_node) 697 __clk_recalc_rates(child, msg); 698 } 699 700 /** 701 * __clk_speculate_rates 702 * @clk: first clk in the subtree 703 * @parent_rate: the "future" rate of clk's parent 704 * 705 * Walks the subtree of clks starting with clk, speculating rates as it 706 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 707 * 708 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 709 * pre-rate change notifications and returns early if no clks in the 710 * subtree have subscribed to the notifications. Note that if a clk does not 711 * implement the .recalc_rate callback then it is assumed that the clock will 712 * take on the rate of it's parent. 713 * 714 * Caller must hold prepare_lock. 715 */ 716 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate) 717 { 718 struct hlist_node *tmp; 719 struct clk *child; 720 unsigned long new_rate; 721 int ret = NOTIFY_DONE; 722 723 if (clk->ops->recalc_rate) 724 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate); 725 else 726 new_rate = parent_rate; 727 728 /* abort the rate change if a driver returns NOTIFY_BAD */ 729 if (clk->notifier_count) 730 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate); 731 732 if (ret == NOTIFY_BAD) 733 goto out; 734 735 hlist_for_each_entry(child, tmp, &clk->children, child_node) { 736 ret = __clk_speculate_rates(child, new_rate); 737 if (ret == NOTIFY_BAD) 738 break; 739 } 740 741 out: 742 return ret; 743 } 744 745 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate) 746 { 747 struct clk *child; 748 struct hlist_node *tmp; 749 750 clk->new_rate = new_rate; 751 752 hlist_for_each_entry(child, tmp, &clk->children, child_node) { 753 if (child->ops->recalc_rate) 754 child->new_rate = child->ops->recalc_rate(child->hw, new_rate); 755 else 756 child->new_rate = new_rate; 757 clk_calc_subtree(child, child->new_rate); 758 } 759 } 760 761 /* 762 * calculate the new rates returning the topmost clock that has to be 763 * changed. 764 */ 765 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) 766 { 767 struct clk *top = clk; 768 unsigned long best_parent_rate = 0; 769 unsigned long new_rate; 770 771 /* sanity */ 772 if (IS_ERR_OR_NULL(clk)) 773 return NULL; 774 775 /* save parent rate, if it exists */ 776 if (clk->parent) 777 best_parent_rate = clk->parent->rate; 778 779 /* never propagate up to the parent */ 780 if (!(clk->flags & CLK_SET_RATE_PARENT)) { 781 if (!clk->ops->round_rate) { 782 clk->new_rate = clk->rate; 783 return NULL; 784 } 785 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate); 786 goto out; 787 } 788 789 /* need clk->parent from here on out */ 790 if (!clk->parent) { 791 pr_debug("%s: %s has NULL parent\n", __func__, clk->name); 792 return NULL; 793 } 794 795 if (!clk->ops->round_rate) { 796 top = clk_calc_new_rates(clk->parent, rate); 797 new_rate = clk->parent->new_rate; 798 799 goto out; 800 } 801 802 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate); 803 804 if (best_parent_rate != clk->parent->rate) { 805 top = clk_calc_new_rates(clk->parent, best_parent_rate); 806 807 goto out; 808 } 809 810 out: 811 clk_calc_subtree(clk, new_rate); 812 813 return top; 814 } 815 816 /* 817 * Notify about rate changes in a subtree. Always walk down the whole tree 818 * so that in case of an error we can walk down the whole tree again and 819 * abort the change. 820 */ 821 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event) 822 { 823 struct hlist_node *tmp; 824 struct clk *child, *fail_clk = NULL; 825 int ret = NOTIFY_DONE; 826 827 if (clk->rate == clk->new_rate) 828 return 0; 829 830 if (clk->notifier_count) { 831 ret = __clk_notify(clk, event, clk->rate, clk->new_rate); 832 if (ret == NOTIFY_BAD) 833 fail_clk = clk; 834 } 835 836 hlist_for_each_entry(child, tmp, &clk->children, child_node) { 837 clk = clk_propagate_rate_change(child, event); 838 if (clk) 839 fail_clk = clk; 840 } 841 842 return fail_clk; 843 } 844 845 /* 846 * walk down a subtree and set the new rates notifying the rate 847 * change on the way 848 */ 849 static void clk_change_rate(struct clk *clk) 850 { 851 struct clk *child; 852 unsigned long old_rate; 853 struct hlist_node *tmp; 854 855 old_rate = clk->rate; 856 857 if (clk->ops->set_rate) 858 clk->ops->set_rate(clk->hw, clk->new_rate, clk->parent->rate); 859 860 if (clk->ops->recalc_rate) 861 clk->rate = clk->ops->recalc_rate(clk->hw, 862 clk->parent->rate); 863 else 864 clk->rate = clk->parent->rate; 865 866 if (clk->notifier_count && old_rate != clk->rate) 867 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); 868 869 hlist_for_each_entry(child, tmp, &clk->children, child_node) 870 clk_change_rate(child); 871 } 872 873 /** 874 * clk_set_rate - specify a new rate for clk 875 * @clk: the clk whose rate is being changed 876 * @rate: the new rate for clk 877 * 878 * In the simplest case clk_set_rate will only adjust the rate of clk. 879 * 880 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 881 * propagate up to clk's parent; whether or not this happens depends on the 882 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 883 * after calling .round_rate then upstream parent propagation is ignored. If 884 * *parent_rate comes back with a new rate for clk's parent then we propagate 885 * up to clk's parent and set it's rate. Upward propagation will continue 886 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 887 * .round_rate stops requesting changes to clk's parent_rate. 888 * 889 * Rate changes are accomplished via tree traversal that also recalculates the 890 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 891 * 892 * Returns 0 on success, -EERROR otherwise. 893 */ 894 int clk_set_rate(struct clk *clk, unsigned long rate) 895 { 896 struct clk *top, *fail_clk; 897 int ret = 0; 898 899 /* prevent racing with updates to the clock topology */ 900 mutex_lock(&prepare_lock); 901 902 /* bail early if nothing to do */ 903 if (rate == clk->rate) 904 goto out; 905 906 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) { 907 ret = -EBUSY; 908 goto out; 909 } 910 911 /* calculate new rates and get the topmost changed clock */ 912 top = clk_calc_new_rates(clk, rate); 913 if (!top) { 914 ret = -EINVAL; 915 goto out; 916 } 917 918 /* notify that we are about to change rates */ 919 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 920 if (fail_clk) { 921 pr_warn("%s: failed to set %s rate\n", __func__, 922 fail_clk->name); 923 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 924 ret = -EBUSY; 925 goto out; 926 } 927 928 /* change the rates */ 929 clk_change_rate(top); 930 931 mutex_unlock(&prepare_lock); 932 933 return 0; 934 out: 935 mutex_unlock(&prepare_lock); 936 937 return ret; 938 } 939 EXPORT_SYMBOL_GPL(clk_set_rate); 940 941 /** 942 * clk_get_parent - return the parent of a clk 943 * @clk: the clk whose parent gets returned 944 * 945 * Simply returns clk->parent. Returns NULL if clk is NULL. 946 */ 947 struct clk *clk_get_parent(struct clk *clk) 948 { 949 struct clk *parent; 950 951 mutex_lock(&prepare_lock); 952 parent = __clk_get_parent(clk); 953 mutex_unlock(&prepare_lock); 954 955 return parent; 956 } 957 EXPORT_SYMBOL_GPL(clk_get_parent); 958 959 /* 960 * .get_parent is mandatory for clocks with multiple possible parents. It is 961 * optional for single-parent clocks. Always call .get_parent if it is 962 * available and WARN if it is missing for multi-parent clocks. 963 * 964 * For single-parent clocks without .get_parent, first check to see if the 965 * .parents array exists, and if so use it to avoid an expensive tree 966 * traversal. If .parents does not exist then walk the tree with __clk_lookup. 967 */ 968 static struct clk *__clk_init_parent(struct clk *clk) 969 { 970 struct clk *ret = NULL; 971 u8 index; 972 973 /* handle the trivial cases */ 974 975 if (!clk->num_parents) 976 goto out; 977 978 if (clk->num_parents == 1) { 979 if (IS_ERR_OR_NULL(clk->parent)) 980 ret = clk->parent = __clk_lookup(clk->parent_names[0]); 981 ret = clk->parent; 982 goto out; 983 } 984 985 if (!clk->ops->get_parent) { 986 WARN(!clk->ops->get_parent, 987 "%s: multi-parent clocks must implement .get_parent\n", 988 __func__); 989 goto out; 990 }; 991 992 /* 993 * Do our best to cache parent clocks in clk->parents. This prevents 994 * unnecessary and expensive calls to __clk_lookup. We don't set 995 * clk->parent here; that is done by the calling function 996 */ 997 998 index = clk->ops->get_parent(clk->hw); 999 1000 if (!clk->parents) 1001 clk->parents = 1002 kmalloc((sizeof(struct clk*) * clk->num_parents), 1003 GFP_KERNEL); 1004 1005 if (!clk->parents) 1006 ret = __clk_lookup(clk->parent_names[index]); 1007 else if (!clk->parents[index]) 1008 ret = clk->parents[index] = 1009 __clk_lookup(clk->parent_names[index]); 1010 else 1011 ret = clk->parents[index]; 1012 1013 out: 1014 return ret; 1015 } 1016 1017 void __clk_reparent(struct clk *clk, struct clk *new_parent) 1018 { 1019 #ifdef CONFIG_COMMON_CLK_DEBUG 1020 struct dentry *d; 1021 struct dentry *new_parent_d; 1022 #endif 1023 1024 if (!clk || !new_parent) 1025 return; 1026 1027 hlist_del(&clk->child_node); 1028 1029 if (new_parent) 1030 hlist_add_head(&clk->child_node, &new_parent->children); 1031 else 1032 hlist_add_head(&clk->child_node, &clk_orphan_list); 1033 1034 #ifdef CONFIG_COMMON_CLK_DEBUG 1035 if (!inited) 1036 goto out; 1037 1038 if (new_parent) 1039 new_parent_d = new_parent->dentry; 1040 else 1041 new_parent_d = orphandir; 1042 1043 d = debugfs_rename(clk->dentry->d_parent, clk->dentry, 1044 new_parent_d, clk->name); 1045 if (d) 1046 clk->dentry = d; 1047 else 1048 pr_debug("%s: failed to rename debugfs entry for %s\n", 1049 __func__, clk->name); 1050 out: 1051 #endif 1052 1053 clk->parent = new_parent; 1054 1055 __clk_recalc_rates(clk, POST_RATE_CHANGE); 1056 } 1057 1058 static int __clk_set_parent(struct clk *clk, struct clk *parent) 1059 { 1060 struct clk *old_parent; 1061 unsigned long flags; 1062 int ret = -EINVAL; 1063 u8 i; 1064 1065 old_parent = clk->parent; 1066 1067 /* find index of new parent clock using cached parent ptrs */ 1068 for (i = 0; i < clk->num_parents; i++) 1069 if (clk->parents[i] == parent) 1070 break; 1071 1072 /* 1073 * find index of new parent clock using string name comparison 1074 * also try to cache the parent to avoid future calls to __clk_lookup 1075 */ 1076 if (i == clk->num_parents) 1077 for (i = 0; i < clk->num_parents; i++) 1078 if (!strcmp(clk->parent_names[i], parent->name)) { 1079 clk->parents[i] = __clk_lookup(parent->name); 1080 break; 1081 } 1082 1083 if (i == clk->num_parents) { 1084 pr_debug("%s: clock %s is not a possible parent of clock %s\n", 1085 __func__, parent->name, clk->name); 1086 goto out; 1087 } 1088 1089 /* migrate prepare and enable */ 1090 if (clk->prepare_count) 1091 __clk_prepare(parent); 1092 1093 /* FIXME replace with clk_is_enabled(clk) someday */ 1094 spin_lock_irqsave(&enable_lock, flags); 1095 if (clk->enable_count) 1096 __clk_enable(parent); 1097 spin_unlock_irqrestore(&enable_lock, flags); 1098 1099 /* change clock input source */ 1100 ret = clk->ops->set_parent(clk->hw, i); 1101 1102 /* clean up old prepare and enable */ 1103 spin_lock_irqsave(&enable_lock, flags); 1104 if (clk->enable_count) 1105 __clk_disable(old_parent); 1106 spin_unlock_irqrestore(&enable_lock, flags); 1107 1108 if (clk->prepare_count) 1109 __clk_unprepare(old_parent); 1110 1111 out: 1112 return ret; 1113 } 1114 1115 /** 1116 * clk_set_parent - switch the parent of a mux clk 1117 * @clk: the mux clk whose input we are switching 1118 * @parent: the new input to clk 1119 * 1120 * Re-parent clk to use parent as it's new input source. If clk has the 1121 * CLK_SET_PARENT_GATE flag set then clk must be gated for this 1122 * operation to succeed. After successfully changing clk's parent 1123 * clk_set_parent will update the clk topology, sysfs topology and 1124 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on 1125 * success, -EERROR otherwise. 1126 */ 1127 int clk_set_parent(struct clk *clk, struct clk *parent) 1128 { 1129 int ret = 0; 1130 1131 if (!clk || !clk->ops) 1132 return -EINVAL; 1133 1134 if (!clk->ops->set_parent) 1135 return -ENOSYS; 1136 1137 /* prevent racing with updates to the clock topology */ 1138 mutex_lock(&prepare_lock); 1139 1140 if (clk->parent == parent) 1141 goto out; 1142 1143 /* propagate PRE_RATE_CHANGE notifications */ 1144 if (clk->notifier_count) 1145 ret = __clk_speculate_rates(clk, parent->rate); 1146 1147 /* abort if a driver objects */ 1148 if (ret == NOTIFY_STOP) 1149 goto out; 1150 1151 /* only re-parent if the clock is not in use */ 1152 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) 1153 ret = -EBUSY; 1154 else 1155 ret = __clk_set_parent(clk, parent); 1156 1157 /* propagate ABORT_RATE_CHANGE if .set_parent failed */ 1158 if (ret) { 1159 __clk_recalc_rates(clk, ABORT_RATE_CHANGE); 1160 goto out; 1161 } 1162 1163 /* propagate rate recalculation downstream */ 1164 __clk_reparent(clk, parent); 1165 1166 out: 1167 mutex_unlock(&prepare_lock); 1168 1169 return ret; 1170 } 1171 EXPORT_SYMBOL_GPL(clk_set_parent); 1172 1173 /** 1174 * __clk_init - initialize the data structures in a struct clk 1175 * @dev: device initializing this clk, placeholder for now 1176 * @clk: clk being initialized 1177 * 1178 * Initializes the lists in struct clk, queries the hardware for the 1179 * parent and rate and sets them both. 1180 */ 1181 int __clk_init(struct device *dev, struct clk *clk) 1182 { 1183 int i, ret = 0; 1184 struct clk *orphan; 1185 struct hlist_node *tmp, *tmp2; 1186 1187 if (!clk) 1188 return -EINVAL; 1189 1190 mutex_lock(&prepare_lock); 1191 1192 /* check to see if a clock with this name is already registered */ 1193 if (__clk_lookup(clk->name)) { 1194 pr_debug("%s: clk %s already initialized\n", 1195 __func__, clk->name); 1196 ret = -EEXIST; 1197 goto out; 1198 } 1199 1200 /* check that clk_ops are sane. See Documentation/clk.txt */ 1201 if (clk->ops->set_rate && 1202 !(clk->ops->round_rate && clk->ops->recalc_rate)) { 1203 pr_warning("%s: %s must implement .round_rate & .recalc_rate\n", 1204 __func__, clk->name); 1205 ret = -EINVAL; 1206 goto out; 1207 } 1208 1209 if (clk->ops->set_parent && !clk->ops->get_parent) { 1210 pr_warning("%s: %s must implement .get_parent & .set_parent\n", 1211 __func__, clk->name); 1212 ret = -EINVAL; 1213 goto out; 1214 } 1215 1216 /* throw a WARN if any entries in parent_names are NULL */ 1217 for (i = 0; i < clk->num_parents; i++) 1218 WARN(!clk->parent_names[i], 1219 "%s: invalid NULL in %s's .parent_names\n", 1220 __func__, clk->name); 1221 1222 /* 1223 * Allocate an array of struct clk *'s to avoid unnecessary string 1224 * look-ups of clk's possible parents. This can fail for clocks passed 1225 * in to clk_init during early boot; thus any access to clk->parents[] 1226 * must always check for a NULL pointer and try to populate it if 1227 * necessary. 1228 * 1229 * If clk->parents is not NULL we skip this entire block. This allows 1230 * for clock drivers to statically initialize clk->parents. 1231 */ 1232 if (clk->num_parents && !clk->parents) { 1233 clk->parents = kmalloc((sizeof(struct clk*) * clk->num_parents), 1234 GFP_KERNEL); 1235 /* 1236 * __clk_lookup returns NULL for parents that have not been 1237 * clk_init'd; thus any access to clk->parents[] must check 1238 * for a NULL pointer. We can always perform lazy lookups for 1239 * missing parents later on. 1240 */ 1241 if (clk->parents) 1242 for (i = 0; i < clk->num_parents; i++) 1243 clk->parents[i] = 1244 __clk_lookup(clk->parent_names[i]); 1245 } 1246 1247 clk->parent = __clk_init_parent(clk); 1248 1249 /* 1250 * Populate clk->parent if parent has already been __clk_init'd. If 1251 * parent has not yet been __clk_init'd then place clk in the orphan 1252 * list. If clk has set the CLK_IS_ROOT flag then place it in the root 1253 * clk list. 1254 * 1255 * Every time a new clk is clk_init'd then we walk the list of orphan 1256 * clocks and re-parent any that are children of the clock currently 1257 * being clk_init'd. 1258 */ 1259 if (clk->parent) 1260 hlist_add_head(&clk->child_node, 1261 &clk->parent->children); 1262 else if (clk->flags & CLK_IS_ROOT) 1263 hlist_add_head(&clk->child_node, &clk_root_list); 1264 else 1265 hlist_add_head(&clk->child_node, &clk_orphan_list); 1266 1267 /* 1268 * Set clk's rate. The preferred method is to use .recalc_rate. For 1269 * simple clocks and lazy developers the default fallback is to use the 1270 * parent's rate. If a clock doesn't have a parent (or is orphaned) 1271 * then rate is set to zero. 1272 */ 1273 if (clk->ops->recalc_rate) 1274 clk->rate = clk->ops->recalc_rate(clk->hw, 1275 __clk_get_rate(clk->parent)); 1276 else if (clk->parent) 1277 clk->rate = clk->parent->rate; 1278 else 1279 clk->rate = 0; 1280 1281 /* 1282 * walk the list of orphan clocks and reparent any that are children of 1283 * this clock 1284 */ 1285 hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) 1286 for (i = 0; i < orphan->num_parents; i++) 1287 if (!strcmp(clk->name, orphan->parent_names[i])) { 1288 __clk_reparent(orphan, clk); 1289 break; 1290 } 1291 1292 /* 1293 * optional platform-specific magic 1294 * 1295 * The .init callback is not used by any of the basic clock types, but 1296 * exists for weird hardware that must perform initialization magic. 1297 * Please consider other ways of solving initialization problems before 1298 * using this callback, as it's use is discouraged. 1299 */ 1300 if (clk->ops->init) 1301 clk->ops->init(clk->hw); 1302 1303 clk_debug_register(clk); 1304 1305 out: 1306 mutex_unlock(&prepare_lock); 1307 1308 return ret; 1309 } 1310 1311 /** 1312 * __clk_register - register a clock and return a cookie. 1313 * 1314 * Same as clk_register, except that the .clk field inside hw shall point to a 1315 * preallocated (generally statically allocated) struct clk. None of the fields 1316 * of the struct clk need to be initialized. 1317 * 1318 * The data pointed to by .init and .clk field shall NOT be marked as init 1319 * data. 1320 * 1321 * __clk_register is only exposed via clk-private.h and is intended for use with 1322 * very large numbers of clocks that need to be statically initialized. It is 1323 * a layering violation to include clk-private.h from any code which implements 1324 * a clock's .ops; as such any statically initialized clock data MUST be in a 1325 * separate C file from the logic that implements it's operations. Returns 0 1326 * on success, otherwise an error code. 1327 */ 1328 struct clk *__clk_register(struct device *dev, struct clk_hw *hw) 1329 { 1330 int ret; 1331 struct clk *clk; 1332 1333 clk = hw->clk; 1334 clk->name = hw->init->name; 1335 clk->ops = hw->init->ops; 1336 clk->hw = hw; 1337 clk->flags = hw->init->flags; 1338 clk->parent_names = hw->init->parent_names; 1339 clk->num_parents = hw->init->num_parents; 1340 1341 ret = __clk_init(dev, clk); 1342 if (ret) 1343 return ERR_PTR(ret); 1344 1345 return clk; 1346 } 1347 EXPORT_SYMBOL_GPL(__clk_register); 1348 1349 /** 1350 * clk_register - allocate a new clock, register it and return an opaque cookie 1351 * @dev: device that is registering this clock 1352 * @hw: link to hardware-specific clock data 1353 * 1354 * clk_register is the primary interface for populating the clock tree with new 1355 * clock nodes. It returns a pointer to the newly allocated struct clk which 1356 * cannot be dereferenced by driver code but may be used in conjuction with the 1357 * rest of the clock API. In the event of an error clk_register will return an 1358 * error code; drivers must test for an error code after calling clk_register. 1359 */ 1360 struct clk *clk_register(struct device *dev, struct clk_hw *hw) 1361 { 1362 int i, ret; 1363 struct clk *clk; 1364 1365 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 1366 if (!clk) { 1367 pr_err("%s: could not allocate clk\n", __func__); 1368 ret = -ENOMEM; 1369 goto fail_out; 1370 } 1371 1372 clk->name = kstrdup(hw->init->name, GFP_KERNEL); 1373 if (!clk->name) { 1374 pr_err("%s: could not allocate clk->name\n", __func__); 1375 ret = -ENOMEM; 1376 goto fail_name; 1377 } 1378 clk->ops = hw->init->ops; 1379 clk->hw = hw; 1380 clk->flags = hw->init->flags; 1381 clk->num_parents = hw->init->num_parents; 1382 hw->clk = clk; 1383 1384 /* allocate local copy in case parent_names is __initdata */ 1385 clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents), 1386 GFP_KERNEL); 1387 1388 if (!clk->parent_names) { 1389 pr_err("%s: could not allocate clk->parent_names\n", __func__); 1390 ret = -ENOMEM; 1391 goto fail_parent_names; 1392 } 1393 1394 1395 /* copy each string name in case parent_names is __initdata */ 1396 for (i = 0; i < clk->num_parents; i++) { 1397 clk->parent_names[i] = kstrdup(hw->init->parent_names[i], 1398 GFP_KERNEL); 1399 if (!clk->parent_names[i]) { 1400 pr_err("%s: could not copy parent_names\n", __func__); 1401 ret = -ENOMEM; 1402 goto fail_parent_names_copy; 1403 } 1404 } 1405 1406 ret = __clk_init(dev, clk); 1407 if (!ret) 1408 return clk; 1409 1410 fail_parent_names_copy: 1411 while (--i >= 0) 1412 kfree(clk->parent_names[i]); 1413 kfree(clk->parent_names); 1414 fail_parent_names: 1415 kfree(clk->name); 1416 fail_name: 1417 kfree(clk); 1418 fail_out: 1419 return ERR_PTR(ret); 1420 } 1421 EXPORT_SYMBOL_GPL(clk_register); 1422 1423 /** 1424 * clk_unregister - unregister a currently registered clock 1425 * @clk: clock to unregister 1426 * 1427 * Currently unimplemented. 1428 */ 1429 void clk_unregister(struct clk *clk) {} 1430 EXPORT_SYMBOL_GPL(clk_unregister); 1431 1432 /*** clk rate change notifiers ***/ 1433 1434 /** 1435 * clk_notifier_register - add a clk rate change notifier 1436 * @clk: struct clk * to watch 1437 * @nb: struct notifier_block * with callback info 1438 * 1439 * Request notification when clk's rate changes. This uses an SRCU 1440 * notifier because we want it to block and notifier unregistrations are 1441 * uncommon. The callbacks associated with the notifier must not 1442 * re-enter into the clk framework by calling any top-level clk APIs; 1443 * this will cause a nested prepare_lock mutex. 1444 * 1445 * Pre-change notifier callbacks will be passed the current, pre-change 1446 * rate of the clk via struct clk_notifier_data.old_rate. The new, 1447 * post-change rate of the clk is passed via struct 1448 * clk_notifier_data.new_rate. 1449 * 1450 * Post-change notifiers will pass the now-current, post-change rate of 1451 * the clk in both struct clk_notifier_data.old_rate and struct 1452 * clk_notifier_data.new_rate. 1453 * 1454 * Abort-change notifiers are effectively the opposite of pre-change 1455 * notifiers: the original pre-change clk rate is passed in via struct 1456 * clk_notifier_data.new_rate and the failed post-change rate is passed 1457 * in via struct clk_notifier_data.old_rate. 1458 * 1459 * clk_notifier_register() must be called from non-atomic context. 1460 * Returns -EINVAL if called with null arguments, -ENOMEM upon 1461 * allocation failure; otherwise, passes along the return value of 1462 * srcu_notifier_chain_register(). 1463 */ 1464 int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 1465 { 1466 struct clk_notifier *cn; 1467 int ret = -ENOMEM; 1468 1469 if (!clk || !nb) 1470 return -EINVAL; 1471 1472 mutex_lock(&prepare_lock); 1473 1474 /* search the list of notifiers for this clk */ 1475 list_for_each_entry(cn, &clk_notifier_list, node) 1476 if (cn->clk == clk) 1477 break; 1478 1479 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 1480 if (cn->clk != clk) { 1481 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL); 1482 if (!cn) 1483 goto out; 1484 1485 cn->clk = clk; 1486 srcu_init_notifier_head(&cn->notifier_head); 1487 1488 list_add(&cn->node, &clk_notifier_list); 1489 } 1490 1491 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 1492 1493 clk->notifier_count++; 1494 1495 out: 1496 mutex_unlock(&prepare_lock); 1497 1498 return ret; 1499 } 1500 EXPORT_SYMBOL_GPL(clk_notifier_register); 1501 1502 /** 1503 * clk_notifier_unregister - remove a clk rate change notifier 1504 * @clk: struct clk * 1505 * @nb: struct notifier_block * with callback info 1506 * 1507 * Request no further notification for changes to 'clk' and frees memory 1508 * allocated in clk_notifier_register. 1509 * 1510 * Returns -EINVAL if called with null arguments; otherwise, passes 1511 * along the return value of srcu_notifier_chain_unregister(). 1512 */ 1513 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 1514 { 1515 struct clk_notifier *cn = NULL; 1516 int ret = -EINVAL; 1517 1518 if (!clk || !nb) 1519 return -EINVAL; 1520 1521 mutex_lock(&prepare_lock); 1522 1523 list_for_each_entry(cn, &clk_notifier_list, node) 1524 if (cn->clk == clk) 1525 break; 1526 1527 if (cn->clk == clk) { 1528 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 1529 1530 clk->notifier_count--; 1531 1532 /* XXX the notifier code should handle this better */ 1533 if (!cn->notifier_head.head) { 1534 srcu_cleanup_notifier_head(&cn->notifier_head); 1535 kfree(cn); 1536 } 1537 1538 } else { 1539 ret = -ENOENT; 1540 } 1541 1542 mutex_unlock(&prepare_lock); 1543 1544 return ret; 1545 } 1546 EXPORT_SYMBOL_GPL(clk_notifier_unregister); 1547