xref: /linux/drivers/clk/clk.c (revision 2dbf708448c836754d25fe6108c5bfe1f5697c95)
1 /*
2  * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3  * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * Standard functionality for the common clock API.  See Documentation/clk.txt
10  */
11 
12 #include <linux/clk-private.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/spinlock.h>
16 #include <linux/err.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 
20 static DEFINE_SPINLOCK(enable_lock);
21 static DEFINE_MUTEX(prepare_lock);
22 
23 static HLIST_HEAD(clk_root_list);
24 static HLIST_HEAD(clk_orphan_list);
25 static LIST_HEAD(clk_notifier_list);
26 
27 /***        debugfs support        ***/
28 
29 #ifdef CONFIG_COMMON_CLK_DEBUG
30 #include <linux/debugfs.h>
31 
32 static struct dentry *rootdir;
33 static struct dentry *orphandir;
34 static int inited = 0;
35 
36 /* caller must hold prepare_lock */
37 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
38 {
39 	struct dentry *d;
40 	int ret = -ENOMEM;
41 
42 	if (!clk || !pdentry) {
43 		ret = -EINVAL;
44 		goto out;
45 	}
46 
47 	d = debugfs_create_dir(clk->name, pdentry);
48 	if (!d)
49 		goto out;
50 
51 	clk->dentry = d;
52 
53 	d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
54 			(u32 *)&clk->rate);
55 	if (!d)
56 		goto err_out;
57 
58 	d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
59 			(u32 *)&clk->flags);
60 	if (!d)
61 		goto err_out;
62 
63 	d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
64 			(u32 *)&clk->prepare_count);
65 	if (!d)
66 		goto err_out;
67 
68 	d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
69 			(u32 *)&clk->enable_count);
70 	if (!d)
71 		goto err_out;
72 
73 	d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
74 			(u32 *)&clk->notifier_count);
75 	if (!d)
76 		goto err_out;
77 
78 	ret = 0;
79 	goto out;
80 
81 err_out:
82 	debugfs_remove(clk->dentry);
83 out:
84 	return ret;
85 }
86 
87 /* caller must hold prepare_lock */
88 static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
89 {
90 	struct clk *child;
91 	struct hlist_node *tmp;
92 	int ret = -EINVAL;;
93 
94 	if (!clk || !pdentry)
95 		goto out;
96 
97 	ret = clk_debug_create_one(clk, pdentry);
98 
99 	if (ret)
100 		goto out;
101 
102 	hlist_for_each_entry(child, tmp, &clk->children, child_node)
103 		clk_debug_create_subtree(child, clk->dentry);
104 
105 	ret = 0;
106 out:
107 	return ret;
108 }
109 
110 /**
111  * clk_debug_register - add a clk node to the debugfs clk tree
112  * @clk: the clk being added to the debugfs clk tree
113  *
114  * Dynamically adds a clk to the debugfs clk tree if debugfs has been
115  * initialized.  Otherwise it bails out early since the debugfs clk tree
116  * will be created lazily by clk_debug_init as part of a late_initcall.
117  *
118  * Caller must hold prepare_lock.  Only clk_init calls this function (so
119  * far) so this is taken care.
120  */
121 static int clk_debug_register(struct clk *clk)
122 {
123 	struct clk *parent;
124 	struct dentry *pdentry;
125 	int ret = 0;
126 
127 	if (!inited)
128 		goto out;
129 
130 	parent = clk->parent;
131 
132 	/*
133 	 * Check to see if a clk is a root clk.  Also check that it is
134 	 * safe to add this clk to debugfs
135 	 */
136 	if (!parent)
137 		if (clk->flags & CLK_IS_ROOT)
138 			pdentry = rootdir;
139 		else
140 			pdentry = orphandir;
141 	else
142 		if (parent->dentry)
143 			pdentry = parent->dentry;
144 		else
145 			goto out;
146 
147 	ret = clk_debug_create_subtree(clk, pdentry);
148 
149 out:
150 	return ret;
151 }
152 
153 /**
154  * clk_debug_init - lazily create the debugfs clk tree visualization
155  *
156  * clks are often initialized very early during boot before memory can
157  * be dynamically allocated and well before debugfs is setup.
158  * clk_debug_init walks the clk tree hierarchy while holding
159  * prepare_lock and creates the topology as part of a late_initcall,
160  * thus insuring that clks initialized very early will still be
161  * represented in the debugfs clk tree.  This function should only be
162  * called once at boot-time, and all other clks added dynamically will
163  * be done so with clk_debug_register.
164  */
165 static int __init clk_debug_init(void)
166 {
167 	struct clk *clk;
168 	struct hlist_node *tmp;
169 
170 	rootdir = debugfs_create_dir("clk", NULL);
171 
172 	if (!rootdir)
173 		return -ENOMEM;
174 
175 	orphandir = debugfs_create_dir("orphans", rootdir);
176 
177 	if (!orphandir)
178 		return -ENOMEM;
179 
180 	mutex_lock(&prepare_lock);
181 
182 	hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
183 		clk_debug_create_subtree(clk, rootdir);
184 
185 	hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
186 		clk_debug_create_subtree(clk, orphandir);
187 
188 	inited = 1;
189 
190 	mutex_unlock(&prepare_lock);
191 
192 	return 0;
193 }
194 late_initcall(clk_debug_init);
195 #else
196 static inline int clk_debug_register(struct clk *clk) { return 0; }
197 #endif /* CONFIG_COMMON_CLK_DEBUG */
198 
199 #ifdef CONFIG_COMMON_CLK_DISABLE_UNUSED
200 /* caller must hold prepare_lock */
201 static void clk_disable_unused_subtree(struct clk *clk)
202 {
203 	struct clk *child;
204 	struct hlist_node *tmp;
205 	unsigned long flags;
206 
207 	if (!clk)
208 		goto out;
209 
210 	hlist_for_each_entry(child, tmp, &clk->children, child_node)
211 		clk_disable_unused_subtree(child);
212 
213 	spin_lock_irqsave(&enable_lock, flags);
214 
215 	if (clk->enable_count)
216 		goto unlock_out;
217 
218 	if (clk->flags & CLK_IGNORE_UNUSED)
219 		goto unlock_out;
220 
221 	if (__clk_is_enabled(clk) && clk->ops->disable)
222 		clk->ops->disable(clk->hw);
223 
224 unlock_out:
225 	spin_unlock_irqrestore(&enable_lock, flags);
226 
227 out:
228 	return;
229 }
230 
231 static int clk_disable_unused(void)
232 {
233 	struct clk *clk;
234 	struct hlist_node *tmp;
235 
236 	mutex_lock(&prepare_lock);
237 
238 	hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
239 		clk_disable_unused_subtree(clk);
240 
241 	hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
242 		clk_disable_unused_subtree(clk);
243 
244 	mutex_unlock(&prepare_lock);
245 
246 	return 0;
247 }
248 late_initcall(clk_disable_unused);
249 #else
250 static inline int clk_disable_unused(struct clk *clk) { return 0; }
251 #endif /* CONFIG_COMMON_CLK_DISABLE_UNUSED */
252 
253 /***    helper functions   ***/
254 
255 inline const char *__clk_get_name(struct clk *clk)
256 {
257 	return !clk ? NULL : clk->name;
258 }
259 
260 inline struct clk_hw *__clk_get_hw(struct clk *clk)
261 {
262 	return !clk ? NULL : clk->hw;
263 }
264 
265 inline u8 __clk_get_num_parents(struct clk *clk)
266 {
267 	return !clk ? -EINVAL : clk->num_parents;
268 }
269 
270 inline struct clk *__clk_get_parent(struct clk *clk)
271 {
272 	return !clk ? NULL : clk->parent;
273 }
274 
275 inline int __clk_get_enable_count(struct clk *clk)
276 {
277 	return !clk ? -EINVAL : clk->enable_count;
278 }
279 
280 inline int __clk_get_prepare_count(struct clk *clk)
281 {
282 	return !clk ? -EINVAL : clk->prepare_count;
283 }
284 
285 unsigned long __clk_get_rate(struct clk *clk)
286 {
287 	unsigned long ret;
288 
289 	if (!clk) {
290 		ret = -EINVAL;
291 		goto out;
292 	}
293 
294 	ret = clk->rate;
295 
296 	if (clk->flags & CLK_IS_ROOT)
297 		goto out;
298 
299 	if (!clk->parent)
300 		ret = -ENODEV;
301 
302 out:
303 	return ret;
304 }
305 
306 inline unsigned long __clk_get_flags(struct clk *clk)
307 {
308 	return !clk ? -EINVAL : clk->flags;
309 }
310 
311 int __clk_is_enabled(struct clk *clk)
312 {
313 	int ret;
314 
315 	if (!clk)
316 		return -EINVAL;
317 
318 	/*
319 	 * .is_enabled is only mandatory for clocks that gate
320 	 * fall back to software usage counter if .is_enabled is missing
321 	 */
322 	if (!clk->ops->is_enabled) {
323 		ret = clk->enable_count ? 1 : 0;
324 		goto out;
325 	}
326 
327 	ret = clk->ops->is_enabled(clk->hw);
328 out:
329 	return ret;
330 }
331 
332 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
333 {
334 	struct clk *child;
335 	struct clk *ret;
336 	struct hlist_node *tmp;
337 
338 	if (!strcmp(clk->name, name))
339 		return clk;
340 
341 	hlist_for_each_entry(child, tmp, &clk->children, child_node) {
342 		ret = __clk_lookup_subtree(name, child);
343 		if (ret)
344 			return ret;
345 	}
346 
347 	return NULL;
348 }
349 
350 struct clk *__clk_lookup(const char *name)
351 {
352 	struct clk *root_clk;
353 	struct clk *ret;
354 	struct hlist_node *tmp;
355 
356 	if (!name)
357 		return NULL;
358 
359 	/* search the 'proper' clk tree first */
360 	hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
361 		ret = __clk_lookup_subtree(name, root_clk);
362 		if (ret)
363 			return ret;
364 	}
365 
366 	/* if not found, then search the orphan tree */
367 	hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
368 		ret = __clk_lookup_subtree(name, root_clk);
369 		if (ret)
370 			return ret;
371 	}
372 
373 	return NULL;
374 }
375 
376 /***        clk api        ***/
377 
378 void __clk_unprepare(struct clk *clk)
379 {
380 	if (!clk)
381 		return;
382 
383 	if (WARN_ON(clk->prepare_count == 0))
384 		return;
385 
386 	if (--clk->prepare_count > 0)
387 		return;
388 
389 	WARN_ON(clk->enable_count > 0);
390 
391 	if (clk->ops->unprepare)
392 		clk->ops->unprepare(clk->hw);
393 
394 	__clk_unprepare(clk->parent);
395 }
396 
397 /**
398  * clk_unprepare - undo preparation of a clock source
399  * @clk: the clk being unprepare
400  *
401  * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
402  * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
403  * if the operation may sleep.  One example is a clk which is accessed over
404  * I2c.  In the complex case a clk gate operation may require a fast and a slow
405  * part.  It is this reason that clk_unprepare and clk_disable are not mutually
406  * exclusive.  In fact clk_disable must be called before clk_unprepare.
407  */
408 void clk_unprepare(struct clk *clk)
409 {
410 	mutex_lock(&prepare_lock);
411 	__clk_unprepare(clk);
412 	mutex_unlock(&prepare_lock);
413 }
414 EXPORT_SYMBOL_GPL(clk_unprepare);
415 
416 int __clk_prepare(struct clk *clk)
417 {
418 	int ret = 0;
419 
420 	if (!clk)
421 		return 0;
422 
423 	if (clk->prepare_count == 0) {
424 		ret = __clk_prepare(clk->parent);
425 		if (ret)
426 			return ret;
427 
428 		if (clk->ops->prepare) {
429 			ret = clk->ops->prepare(clk->hw);
430 			if (ret) {
431 				__clk_unprepare(clk->parent);
432 				return ret;
433 			}
434 		}
435 	}
436 
437 	clk->prepare_count++;
438 
439 	return 0;
440 }
441 
442 /**
443  * clk_prepare - prepare a clock source
444  * @clk: the clk being prepared
445  *
446  * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
447  * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
448  * operation may sleep.  One example is a clk which is accessed over I2c.  In
449  * the complex case a clk ungate operation may require a fast and a slow part.
450  * It is this reason that clk_prepare and clk_enable are not mutually
451  * exclusive.  In fact clk_prepare must be called before clk_enable.
452  * Returns 0 on success, -EERROR otherwise.
453  */
454 int clk_prepare(struct clk *clk)
455 {
456 	int ret;
457 
458 	mutex_lock(&prepare_lock);
459 	ret = __clk_prepare(clk);
460 	mutex_unlock(&prepare_lock);
461 
462 	return ret;
463 }
464 EXPORT_SYMBOL_GPL(clk_prepare);
465 
466 static void __clk_disable(struct clk *clk)
467 {
468 	if (!clk)
469 		return;
470 
471 	if (WARN_ON(clk->enable_count == 0))
472 		return;
473 
474 	if (--clk->enable_count > 0)
475 		return;
476 
477 	if (clk->ops->disable)
478 		clk->ops->disable(clk->hw);
479 
480 	__clk_disable(clk->parent);
481 }
482 
483 /**
484  * clk_disable - gate a clock
485  * @clk: the clk being gated
486  *
487  * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
488  * a simple case, clk_disable can be used instead of clk_unprepare to gate a
489  * clk if the operation is fast and will never sleep.  One example is a
490  * SoC-internal clk which is controlled via simple register writes.  In the
491  * complex case a clk gate operation may require a fast and a slow part.  It is
492  * this reason that clk_unprepare and clk_disable are not mutually exclusive.
493  * In fact clk_disable must be called before clk_unprepare.
494  */
495 void clk_disable(struct clk *clk)
496 {
497 	unsigned long flags;
498 
499 	spin_lock_irqsave(&enable_lock, flags);
500 	__clk_disable(clk);
501 	spin_unlock_irqrestore(&enable_lock, flags);
502 }
503 EXPORT_SYMBOL_GPL(clk_disable);
504 
505 static int __clk_enable(struct clk *clk)
506 {
507 	int ret = 0;
508 
509 	if (!clk)
510 		return 0;
511 
512 	if (WARN_ON(clk->prepare_count == 0))
513 		return -ESHUTDOWN;
514 
515 	if (clk->enable_count == 0) {
516 		ret = __clk_enable(clk->parent);
517 
518 		if (ret)
519 			return ret;
520 
521 		if (clk->ops->enable) {
522 			ret = clk->ops->enable(clk->hw);
523 			if (ret) {
524 				__clk_disable(clk->parent);
525 				return ret;
526 			}
527 		}
528 	}
529 
530 	clk->enable_count++;
531 	return 0;
532 }
533 
534 /**
535  * clk_enable - ungate a clock
536  * @clk: the clk being ungated
537  *
538  * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
539  * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
540  * if the operation will never sleep.  One example is a SoC-internal clk which
541  * is controlled via simple register writes.  In the complex case a clk ungate
542  * operation may require a fast and a slow part.  It is this reason that
543  * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
544  * must be called before clk_enable.  Returns 0 on success, -EERROR
545  * otherwise.
546  */
547 int clk_enable(struct clk *clk)
548 {
549 	unsigned long flags;
550 	int ret;
551 
552 	spin_lock_irqsave(&enable_lock, flags);
553 	ret = __clk_enable(clk);
554 	spin_unlock_irqrestore(&enable_lock, flags);
555 
556 	return ret;
557 }
558 EXPORT_SYMBOL_GPL(clk_enable);
559 
560 /**
561  * clk_get_rate - return the rate of clk
562  * @clk: the clk whose rate is being returned
563  *
564  * Simply returns the cached rate of the clk.  Does not query the hardware.  If
565  * clk is NULL then returns -EINVAL.
566  */
567 unsigned long clk_get_rate(struct clk *clk)
568 {
569 	unsigned long rate;
570 
571 	mutex_lock(&prepare_lock);
572 	rate = __clk_get_rate(clk);
573 	mutex_unlock(&prepare_lock);
574 
575 	return rate;
576 }
577 EXPORT_SYMBOL_GPL(clk_get_rate);
578 
579 /**
580  * __clk_round_rate - round the given rate for a clk
581  * @clk: round the rate of this clock
582  *
583  * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
584  */
585 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
586 {
587 	unsigned long unused;
588 
589 	if (!clk)
590 		return -EINVAL;
591 
592 	if (!clk->ops->round_rate)
593 		return clk->rate;
594 
595 	if (clk->flags & CLK_SET_RATE_PARENT)
596 		return clk->ops->round_rate(clk->hw, rate, &unused);
597 	else
598 		return clk->ops->round_rate(clk->hw, rate, NULL);
599 }
600 
601 /**
602  * clk_round_rate - round the given rate for a clk
603  * @clk: the clk for which we are rounding a rate
604  * @rate: the rate which is to be rounded
605  *
606  * Takes in a rate as input and rounds it to a rate that the clk can actually
607  * use which is then returned.  If clk doesn't support round_rate operation
608  * then the parent rate is returned.
609  */
610 long clk_round_rate(struct clk *clk, unsigned long rate)
611 {
612 	unsigned long ret;
613 
614 	mutex_lock(&prepare_lock);
615 	ret = __clk_round_rate(clk, rate);
616 	mutex_unlock(&prepare_lock);
617 
618 	return ret;
619 }
620 EXPORT_SYMBOL_GPL(clk_round_rate);
621 
622 /**
623  * __clk_notify - call clk notifier chain
624  * @clk: struct clk * that is changing rate
625  * @msg: clk notifier type (see include/linux/clk.h)
626  * @old_rate: old clk rate
627  * @new_rate: new clk rate
628  *
629  * Triggers a notifier call chain on the clk rate-change notification
630  * for 'clk'.  Passes a pointer to the struct clk and the previous
631  * and current rates to the notifier callback.  Intended to be called by
632  * internal clock code only.  Returns NOTIFY_DONE from the last driver
633  * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
634  * a driver returns that.
635  */
636 static int __clk_notify(struct clk *clk, unsigned long msg,
637 		unsigned long old_rate, unsigned long new_rate)
638 {
639 	struct clk_notifier *cn;
640 	struct clk_notifier_data cnd;
641 	int ret = NOTIFY_DONE;
642 
643 	cnd.clk = clk;
644 	cnd.old_rate = old_rate;
645 	cnd.new_rate = new_rate;
646 
647 	list_for_each_entry(cn, &clk_notifier_list, node) {
648 		if (cn->clk == clk) {
649 			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
650 					&cnd);
651 			break;
652 		}
653 	}
654 
655 	return ret;
656 }
657 
658 /**
659  * __clk_recalc_rates
660  * @clk: first clk in the subtree
661  * @msg: notification type (see include/linux/clk.h)
662  *
663  * Walks the subtree of clks starting with clk and recalculates rates as it
664  * goes.  Note that if a clk does not implement the .recalc_rate callback then
665  * it is assumed that the clock will take on the rate of it's parent.
666  *
667  * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
668  * if necessary.
669  *
670  * Caller must hold prepare_lock.
671  */
672 static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
673 {
674 	unsigned long old_rate;
675 	unsigned long parent_rate = 0;
676 	struct hlist_node *tmp;
677 	struct clk *child;
678 
679 	old_rate = clk->rate;
680 
681 	if (clk->parent)
682 		parent_rate = clk->parent->rate;
683 
684 	if (clk->ops->recalc_rate)
685 		clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
686 	else
687 		clk->rate = parent_rate;
688 
689 	/*
690 	 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
691 	 * & ABORT_RATE_CHANGE notifiers
692 	 */
693 	if (clk->notifier_count && msg)
694 		__clk_notify(clk, msg, old_rate, clk->rate);
695 
696 	hlist_for_each_entry(child, tmp, &clk->children, child_node)
697 		__clk_recalc_rates(child, msg);
698 }
699 
700 /**
701  * __clk_speculate_rates
702  * @clk: first clk in the subtree
703  * @parent_rate: the "future" rate of clk's parent
704  *
705  * Walks the subtree of clks starting with clk, speculating rates as it
706  * goes and firing off PRE_RATE_CHANGE notifications as necessary.
707  *
708  * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
709  * pre-rate change notifications and returns early if no clks in the
710  * subtree have subscribed to the notifications.  Note that if a clk does not
711  * implement the .recalc_rate callback then it is assumed that the clock will
712  * take on the rate of it's parent.
713  *
714  * Caller must hold prepare_lock.
715  */
716 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
717 {
718 	struct hlist_node *tmp;
719 	struct clk *child;
720 	unsigned long new_rate;
721 	int ret = NOTIFY_DONE;
722 
723 	if (clk->ops->recalc_rate)
724 		new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
725 	else
726 		new_rate = parent_rate;
727 
728 	/* abort the rate change if a driver returns NOTIFY_BAD */
729 	if (clk->notifier_count)
730 		ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
731 
732 	if (ret == NOTIFY_BAD)
733 		goto out;
734 
735 	hlist_for_each_entry(child, tmp, &clk->children, child_node) {
736 		ret = __clk_speculate_rates(child, new_rate);
737 		if (ret == NOTIFY_BAD)
738 			break;
739 	}
740 
741 out:
742 	return ret;
743 }
744 
745 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
746 {
747 	struct clk *child;
748 	struct hlist_node *tmp;
749 
750 	clk->new_rate = new_rate;
751 
752 	hlist_for_each_entry(child, tmp, &clk->children, child_node) {
753 		if (child->ops->recalc_rate)
754 			child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
755 		else
756 			child->new_rate = new_rate;
757 		clk_calc_subtree(child, child->new_rate);
758 	}
759 }
760 
761 /*
762  * calculate the new rates returning the topmost clock that has to be
763  * changed.
764  */
765 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
766 {
767 	struct clk *top = clk;
768 	unsigned long best_parent_rate = clk->parent->rate;
769 	unsigned long new_rate;
770 
771 	if (!clk->ops->round_rate && !(clk->flags & CLK_SET_RATE_PARENT)) {
772 		clk->new_rate = clk->rate;
773 		return NULL;
774 	}
775 
776 	if (!clk->ops->round_rate && (clk->flags & CLK_SET_RATE_PARENT)) {
777 		top = clk_calc_new_rates(clk->parent, rate);
778 		new_rate = clk->new_rate = clk->parent->new_rate;
779 
780 		goto out;
781 	}
782 
783 	if (clk->flags & CLK_SET_RATE_PARENT)
784 		new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
785 	else
786 		new_rate = clk->ops->round_rate(clk->hw, rate, NULL);
787 
788 	if (best_parent_rate != clk->parent->rate) {
789 		top = clk_calc_new_rates(clk->parent, best_parent_rate);
790 
791 		goto out;
792 	}
793 
794 out:
795 	clk_calc_subtree(clk, new_rate);
796 
797 	return top;
798 }
799 
800 /*
801  * Notify about rate changes in a subtree. Always walk down the whole tree
802  * so that in case of an error we can walk down the whole tree again and
803  * abort the change.
804  */
805 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
806 {
807 	struct hlist_node *tmp;
808 	struct clk *child, *fail_clk = NULL;
809 	int ret = NOTIFY_DONE;
810 
811 	if (clk->rate == clk->new_rate)
812 		return 0;
813 
814 	if (clk->notifier_count) {
815 		ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
816 		if (ret == NOTIFY_BAD)
817 			fail_clk = clk;
818 	}
819 
820 	hlist_for_each_entry(child, tmp, &clk->children, child_node) {
821 		clk = clk_propagate_rate_change(child, event);
822 		if (clk)
823 			fail_clk = clk;
824 	}
825 
826 	return fail_clk;
827 }
828 
829 /*
830  * walk down a subtree and set the new rates notifying the rate
831  * change on the way
832  */
833 static void clk_change_rate(struct clk *clk)
834 {
835 	struct clk *child;
836 	unsigned long old_rate;
837 	struct hlist_node *tmp;
838 
839 	old_rate = clk->rate;
840 
841 	if (clk->ops->set_rate)
842 		clk->ops->set_rate(clk->hw, clk->new_rate);
843 
844 	if (clk->ops->recalc_rate)
845 		clk->rate = clk->ops->recalc_rate(clk->hw,
846 				clk->parent->rate);
847 	else
848 		clk->rate = clk->parent->rate;
849 
850 	if (clk->notifier_count && old_rate != clk->rate)
851 		__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
852 
853 	hlist_for_each_entry(child, tmp, &clk->children, child_node)
854 		clk_change_rate(child);
855 }
856 
857 /**
858  * clk_set_rate - specify a new rate for clk
859  * @clk: the clk whose rate is being changed
860  * @rate: the new rate for clk
861  *
862  * In the simplest case clk_set_rate will only change the rate of clk.
863  *
864  * If clk has the CLK_SET_RATE_GATE flag set and it is enabled this call
865  * will fail; only when the clk is disabled will it be able to change
866  * its rate.
867  *
868  * Setting the CLK_SET_RATE_PARENT flag allows clk_set_rate to
869  * recursively propagate up to clk's parent; whether or not this happens
870  * depends on the outcome of clk's .round_rate implementation.  If
871  * *parent_rate is 0 after calling .round_rate then upstream parent
872  * propagation is ignored.  If *parent_rate comes back with a new rate
873  * for clk's parent then we propagate up to clk's parent and set it's
874  * rate.  Upward propagation will continue until either a clk does not
875  * support the CLK_SET_RATE_PARENT flag or .round_rate stops requesting
876  * changes to clk's parent_rate.  If there is a failure during upstream
877  * propagation then clk_set_rate will unwind and restore each clk's rate
878  * that had been successfully changed.  Afterwards a rate change abort
879  * notification will be propagated downstream, starting from the clk
880  * that failed.
881  *
882  * At the end of all of the rate setting, clk_set_rate internally calls
883  * __clk_recalc_rates and propagates the rate changes downstream,
884  * starting from the highest clk whose rate was changed.  This has the
885  * added benefit of propagating post-rate change notifiers.
886  *
887  * Note that while post-rate change and rate change abort notifications
888  * are guaranteed to be sent to a clk only once per call to
889  * clk_set_rate, pre-change notifications will be sent for every clk
890  * whose rate is changed.  Stacking pre-change notifications is noisy
891  * for the drivers subscribed to them, but this allows drivers to react
892  * to intermediate clk rate changes up until the point where the final
893  * rate is achieved at the end of upstream propagation.
894  *
895  * Returns 0 on success, -EERROR otherwise.
896  */
897 int clk_set_rate(struct clk *clk, unsigned long rate)
898 {
899 	struct clk *top, *fail_clk;
900 	int ret = 0;
901 
902 	/* prevent racing with updates to the clock topology */
903 	mutex_lock(&prepare_lock);
904 
905 	/* bail early if nothing to do */
906 	if (rate == clk->rate)
907 		goto out;
908 
909 	/* calculate new rates and get the topmost changed clock */
910 	top = clk_calc_new_rates(clk, rate);
911 	if (!top) {
912 		ret = -EINVAL;
913 		goto out;
914 	}
915 
916 	/* notify that we are about to change rates */
917 	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
918 	if (fail_clk) {
919 		pr_warn("%s: failed to set %s rate\n", __func__,
920 				fail_clk->name);
921 		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
922 		ret = -EBUSY;
923 		goto out;
924 	}
925 
926 	/* change the rates */
927 	clk_change_rate(top);
928 
929 	mutex_unlock(&prepare_lock);
930 
931 	return 0;
932 out:
933 	mutex_unlock(&prepare_lock);
934 
935 	return ret;
936 }
937 EXPORT_SYMBOL_GPL(clk_set_rate);
938 
939 /**
940  * clk_get_parent - return the parent of a clk
941  * @clk: the clk whose parent gets returned
942  *
943  * Simply returns clk->parent.  Returns NULL if clk is NULL.
944  */
945 struct clk *clk_get_parent(struct clk *clk)
946 {
947 	struct clk *parent;
948 
949 	mutex_lock(&prepare_lock);
950 	parent = __clk_get_parent(clk);
951 	mutex_unlock(&prepare_lock);
952 
953 	return parent;
954 }
955 EXPORT_SYMBOL_GPL(clk_get_parent);
956 
957 /*
958  * .get_parent is mandatory for clocks with multiple possible parents.  It is
959  * optional for single-parent clocks.  Always call .get_parent if it is
960  * available and WARN if it is missing for multi-parent clocks.
961  *
962  * For single-parent clocks without .get_parent, first check to see if the
963  * .parents array exists, and if so use it to avoid an expensive tree
964  * traversal.  If .parents does not exist then walk the tree with __clk_lookup.
965  */
966 static struct clk *__clk_init_parent(struct clk *clk)
967 {
968 	struct clk *ret = NULL;
969 	u8 index;
970 
971 	/* handle the trivial cases */
972 
973 	if (!clk->num_parents)
974 		goto out;
975 
976 	if (clk->num_parents == 1) {
977 		if (IS_ERR_OR_NULL(clk->parent))
978 			ret = clk->parent = __clk_lookup(clk->parent_names[0]);
979 		ret = clk->parent;
980 		goto out;
981 	}
982 
983 	if (!clk->ops->get_parent) {
984 		WARN(!clk->ops->get_parent,
985 			"%s: multi-parent clocks must implement .get_parent\n",
986 			__func__);
987 		goto out;
988 	};
989 
990 	/*
991 	 * Do our best to cache parent clocks in clk->parents.  This prevents
992 	 * unnecessary and expensive calls to __clk_lookup.  We don't set
993 	 * clk->parent here; that is done by the calling function
994 	 */
995 
996 	index = clk->ops->get_parent(clk->hw);
997 
998 	if (!clk->parents)
999 		clk->parents =
1000 			kmalloc((sizeof(struct clk*) * clk->num_parents),
1001 					GFP_KERNEL);
1002 
1003 	if (!clk->parents)
1004 		ret = __clk_lookup(clk->parent_names[index]);
1005 	else if (!clk->parents[index])
1006 		ret = clk->parents[index] =
1007 			__clk_lookup(clk->parent_names[index]);
1008 	else
1009 		ret = clk->parents[index];
1010 
1011 out:
1012 	return ret;
1013 }
1014 
1015 void __clk_reparent(struct clk *clk, struct clk *new_parent)
1016 {
1017 #ifdef CONFIG_COMMON_CLK_DEBUG
1018 	struct dentry *d;
1019 	struct dentry *new_parent_d;
1020 #endif
1021 
1022 	if (!clk || !new_parent)
1023 		return;
1024 
1025 	hlist_del(&clk->child_node);
1026 
1027 	if (new_parent)
1028 		hlist_add_head(&clk->child_node, &new_parent->children);
1029 	else
1030 		hlist_add_head(&clk->child_node, &clk_orphan_list);
1031 
1032 #ifdef CONFIG_COMMON_CLK_DEBUG
1033 	if (!inited)
1034 		goto out;
1035 
1036 	if (new_parent)
1037 		new_parent_d = new_parent->dentry;
1038 	else
1039 		new_parent_d = orphandir;
1040 
1041 	d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1042 			new_parent_d, clk->name);
1043 	if (d)
1044 		clk->dentry = d;
1045 	else
1046 		pr_debug("%s: failed to rename debugfs entry for %s\n",
1047 				__func__, clk->name);
1048 out:
1049 #endif
1050 
1051 	clk->parent = new_parent;
1052 
1053 	__clk_recalc_rates(clk, POST_RATE_CHANGE);
1054 }
1055 
1056 static int __clk_set_parent(struct clk *clk, struct clk *parent)
1057 {
1058 	struct clk *old_parent;
1059 	unsigned long flags;
1060 	int ret = -EINVAL;
1061 	u8 i;
1062 
1063 	old_parent = clk->parent;
1064 
1065 	/* find index of new parent clock using cached parent ptrs */
1066 	for (i = 0; i < clk->num_parents; i++)
1067 		if (clk->parents[i] == parent)
1068 			break;
1069 
1070 	/*
1071 	 * find index of new parent clock using string name comparison
1072 	 * also try to cache the parent to avoid future calls to __clk_lookup
1073 	 */
1074 	if (i == clk->num_parents)
1075 		for (i = 0; i < clk->num_parents; i++)
1076 			if (!strcmp(clk->parent_names[i], parent->name)) {
1077 				clk->parents[i] = __clk_lookup(parent->name);
1078 				break;
1079 			}
1080 
1081 	if (i == clk->num_parents) {
1082 		pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1083 				__func__, parent->name, clk->name);
1084 		goto out;
1085 	}
1086 
1087 	/* migrate prepare and enable */
1088 	if (clk->prepare_count)
1089 		__clk_prepare(parent);
1090 
1091 	/* FIXME replace with clk_is_enabled(clk) someday */
1092 	spin_lock_irqsave(&enable_lock, flags);
1093 	if (clk->enable_count)
1094 		__clk_enable(parent);
1095 	spin_unlock_irqrestore(&enable_lock, flags);
1096 
1097 	/* change clock input source */
1098 	ret = clk->ops->set_parent(clk->hw, i);
1099 
1100 	/* clean up old prepare and enable */
1101 	spin_lock_irqsave(&enable_lock, flags);
1102 	if (clk->enable_count)
1103 		__clk_disable(old_parent);
1104 	spin_unlock_irqrestore(&enable_lock, flags);
1105 
1106 	if (clk->prepare_count)
1107 		__clk_unprepare(old_parent);
1108 
1109 out:
1110 	return ret;
1111 }
1112 
1113 /**
1114  * clk_set_parent - switch the parent of a mux clk
1115  * @clk: the mux clk whose input we are switching
1116  * @parent: the new input to clk
1117  *
1118  * Re-parent clk to use parent as it's new input source.  If clk has the
1119  * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1120  * operation to succeed.  After successfully changing clk's parent
1121  * clk_set_parent will update the clk topology, sysfs topology and
1122  * propagate rate recalculation via __clk_recalc_rates.  Returns 0 on
1123  * success, -EERROR otherwise.
1124  */
1125 int clk_set_parent(struct clk *clk, struct clk *parent)
1126 {
1127 	int ret = 0;
1128 
1129 	if (!clk || !clk->ops)
1130 		return -EINVAL;
1131 
1132 	if (!clk->ops->set_parent)
1133 		return -ENOSYS;
1134 
1135 	/* prevent racing with updates to the clock topology */
1136 	mutex_lock(&prepare_lock);
1137 
1138 	if (clk->parent == parent)
1139 		goto out;
1140 
1141 	/* propagate PRE_RATE_CHANGE notifications */
1142 	if (clk->notifier_count)
1143 		ret = __clk_speculate_rates(clk, parent->rate);
1144 
1145 	/* abort if a driver objects */
1146 	if (ret == NOTIFY_STOP)
1147 		goto out;
1148 
1149 	/* only re-parent if the clock is not in use */
1150 	if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1151 		ret = -EBUSY;
1152 	else
1153 		ret = __clk_set_parent(clk, parent);
1154 
1155 	/* propagate ABORT_RATE_CHANGE if .set_parent failed */
1156 	if (ret) {
1157 		__clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1158 		goto out;
1159 	}
1160 
1161 	/* propagate rate recalculation downstream */
1162 	__clk_reparent(clk, parent);
1163 
1164 out:
1165 	mutex_unlock(&prepare_lock);
1166 
1167 	return ret;
1168 }
1169 EXPORT_SYMBOL_GPL(clk_set_parent);
1170 
1171 /**
1172  * __clk_init - initialize the data structures in a struct clk
1173  * @dev:	device initializing this clk, placeholder for now
1174  * @clk:	clk being initialized
1175  *
1176  * Initializes the lists in struct clk, queries the hardware for the
1177  * parent and rate and sets them both.
1178  *
1179  * Any struct clk passed into __clk_init must have the following members
1180  * populated:
1181  * 	.name
1182  * 	.ops
1183  * 	.hw
1184  * 	.parent_names
1185  * 	.num_parents
1186  * 	.flags
1187  *
1188  * Essentially, everything that would normally be passed into clk_register is
1189  * assumed to be initialized already in __clk_init.  The other members may be
1190  * populated, but are optional.
1191  *
1192  * __clk_init is only exposed via clk-private.h and is intended for use with
1193  * very large numbers of clocks that need to be statically initialized.  It is
1194  * a layering violation to include clk-private.h from any code which implements
1195  * a clock's .ops; as such any statically initialized clock data MUST be in a
1196  * separate C file from the logic that implements it's operations.
1197  */
1198 void __clk_init(struct device *dev, struct clk *clk)
1199 {
1200 	int i;
1201 	struct clk *orphan;
1202 	struct hlist_node *tmp, *tmp2;
1203 
1204 	if (!clk)
1205 		return;
1206 
1207 	mutex_lock(&prepare_lock);
1208 
1209 	/* check to see if a clock with this name is already registered */
1210 	if (__clk_lookup(clk->name))
1211 		goto out;
1212 
1213 	/* throw a WARN if any entries in parent_names are NULL */
1214 	for (i = 0; i < clk->num_parents; i++)
1215 		WARN(!clk->parent_names[i],
1216 				"%s: invalid NULL in %s's .parent_names\n",
1217 				__func__, clk->name);
1218 
1219 	/*
1220 	 * Allocate an array of struct clk *'s to avoid unnecessary string
1221 	 * look-ups of clk's possible parents.  This can fail for clocks passed
1222 	 * in to clk_init during early boot; thus any access to clk->parents[]
1223 	 * must always check for a NULL pointer and try to populate it if
1224 	 * necessary.
1225 	 *
1226 	 * If clk->parents is not NULL we skip this entire block.  This allows
1227 	 * for clock drivers to statically initialize clk->parents.
1228 	 */
1229 	if (clk->num_parents && !clk->parents) {
1230 		clk->parents = kmalloc((sizeof(struct clk*) * clk->num_parents),
1231 				GFP_KERNEL);
1232 		/*
1233 		 * __clk_lookup returns NULL for parents that have not been
1234 		 * clk_init'd; thus any access to clk->parents[] must check
1235 		 * for a NULL pointer.  We can always perform lazy lookups for
1236 		 * missing parents later on.
1237 		 */
1238 		if (clk->parents)
1239 			for (i = 0; i < clk->num_parents; i++)
1240 				clk->parents[i] =
1241 					__clk_lookup(clk->parent_names[i]);
1242 	}
1243 
1244 	clk->parent = __clk_init_parent(clk);
1245 
1246 	/*
1247 	 * Populate clk->parent if parent has already been __clk_init'd.  If
1248 	 * parent has not yet been __clk_init'd then place clk in the orphan
1249 	 * list.  If clk has set the CLK_IS_ROOT flag then place it in the root
1250 	 * clk list.
1251 	 *
1252 	 * Every time a new clk is clk_init'd then we walk the list of orphan
1253 	 * clocks and re-parent any that are children of the clock currently
1254 	 * being clk_init'd.
1255 	 */
1256 	if (clk->parent)
1257 		hlist_add_head(&clk->child_node,
1258 				&clk->parent->children);
1259 	else if (clk->flags & CLK_IS_ROOT)
1260 		hlist_add_head(&clk->child_node, &clk_root_list);
1261 	else
1262 		hlist_add_head(&clk->child_node, &clk_orphan_list);
1263 
1264 	/*
1265 	 * Set clk's rate.  The preferred method is to use .recalc_rate.  For
1266 	 * simple clocks and lazy developers the default fallback is to use the
1267 	 * parent's rate.  If a clock doesn't have a parent (or is orphaned)
1268 	 * then rate is set to zero.
1269 	 */
1270 	if (clk->ops->recalc_rate)
1271 		clk->rate = clk->ops->recalc_rate(clk->hw,
1272 				__clk_get_rate(clk->parent));
1273 	else if (clk->parent)
1274 		clk->rate = clk->parent->rate;
1275 	else
1276 		clk->rate = 0;
1277 
1278 	/*
1279 	 * walk the list of orphan clocks and reparent any that are children of
1280 	 * this clock
1281 	 */
1282 	hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node)
1283 		for (i = 0; i < orphan->num_parents; i++)
1284 			if (!strcmp(clk->name, orphan->parent_names[i])) {
1285 				__clk_reparent(orphan, clk);
1286 				break;
1287 			}
1288 
1289 	/*
1290 	 * optional platform-specific magic
1291 	 *
1292 	 * The .init callback is not used by any of the basic clock types, but
1293 	 * exists for weird hardware that must perform initialization magic.
1294 	 * Please consider other ways of solving initialization problems before
1295 	 * using this callback, as it's use is discouraged.
1296 	 */
1297 	if (clk->ops->init)
1298 		clk->ops->init(clk->hw);
1299 
1300 	clk_debug_register(clk);
1301 
1302 out:
1303 	mutex_unlock(&prepare_lock);
1304 
1305 	return;
1306 }
1307 
1308 /**
1309  * clk_register - allocate a new clock, register it and return an opaque cookie
1310  * @dev: device that is registering this clock
1311  * @name: clock name
1312  * @ops: operations this clock supports
1313  * @hw: link to hardware-specific clock data
1314  * @parent_names: array of string names for all possible parents
1315  * @num_parents: number of possible parents
1316  * @flags: framework-level hints and quirks
1317  *
1318  * clk_register is the primary interface for populating the clock tree with new
1319  * clock nodes.  It returns a pointer to the newly allocated struct clk which
1320  * cannot be dereferenced by driver code but may be used in conjuction with the
1321  * rest of the clock API.
1322  */
1323 struct clk *clk_register(struct device *dev, const char *name,
1324 		const struct clk_ops *ops, struct clk_hw *hw,
1325 		char **parent_names, u8 num_parents, unsigned long flags)
1326 {
1327 	struct clk *clk;
1328 
1329 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1330 	if (!clk)
1331 		return NULL;
1332 
1333 	clk->name = name;
1334 	clk->ops = ops;
1335 	clk->hw = hw;
1336 	clk->flags = flags;
1337 	clk->parent_names = parent_names;
1338 	clk->num_parents = num_parents;
1339 	hw->clk = clk;
1340 
1341 	__clk_init(dev, clk);
1342 
1343 	return clk;
1344 }
1345 EXPORT_SYMBOL_GPL(clk_register);
1346 
1347 /***        clk rate change notifiers        ***/
1348 
1349 /**
1350  * clk_notifier_register - add a clk rate change notifier
1351  * @clk: struct clk * to watch
1352  * @nb: struct notifier_block * with callback info
1353  *
1354  * Request notification when clk's rate changes.  This uses an SRCU
1355  * notifier because we want it to block and notifier unregistrations are
1356  * uncommon.  The callbacks associated with the notifier must not
1357  * re-enter into the clk framework by calling any top-level clk APIs;
1358  * this will cause a nested prepare_lock mutex.
1359  *
1360  * Pre-change notifier callbacks will be passed the current, pre-change
1361  * rate of the clk via struct clk_notifier_data.old_rate.  The new,
1362  * post-change rate of the clk is passed via struct
1363  * clk_notifier_data.new_rate.
1364  *
1365  * Post-change notifiers will pass the now-current, post-change rate of
1366  * the clk in both struct clk_notifier_data.old_rate and struct
1367  * clk_notifier_data.new_rate.
1368  *
1369  * Abort-change notifiers are effectively the opposite of pre-change
1370  * notifiers: the original pre-change clk rate is passed in via struct
1371  * clk_notifier_data.new_rate and the failed post-change rate is passed
1372  * in via struct clk_notifier_data.old_rate.
1373  *
1374  * clk_notifier_register() must be called from non-atomic context.
1375  * Returns -EINVAL if called with null arguments, -ENOMEM upon
1376  * allocation failure; otherwise, passes along the return value of
1377  * srcu_notifier_chain_register().
1378  */
1379 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1380 {
1381 	struct clk_notifier *cn;
1382 	int ret = -ENOMEM;
1383 
1384 	if (!clk || !nb)
1385 		return -EINVAL;
1386 
1387 	mutex_lock(&prepare_lock);
1388 
1389 	/* search the list of notifiers for this clk */
1390 	list_for_each_entry(cn, &clk_notifier_list, node)
1391 		if (cn->clk == clk)
1392 			break;
1393 
1394 	/* if clk wasn't in the notifier list, allocate new clk_notifier */
1395 	if (cn->clk != clk) {
1396 		cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1397 		if (!cn)
1398 			goto out;
1399 
1400 		cn->clk = clk;
1401 		srcu_init_notifier_head(&cn->notifier_head);
1402 
1403 		list_add(&cn->node, &clk_notifier_list);
1404 	}
1405 
1406 	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1407 
1408 	clk->notifier_count++;
1409 
1410 out:
1411 	mutex_unlock(&prepare_lock);
1412 
1413 	return ret;
1414 }
1415 EXPORT_SYMBOL_GPL(clk_notifier_register);
1416 
1417 /**
1418  * clk_notifier_unregister - remove a clk rate change notifier
1419  * @clk: struct clk *
1420  * @nb: struct notifier_block * with callback info
1421  *
1422  * Request no further notification for changes to 'clk' and frees memory
1423  * allocated in clk_notifier_register.
1424  *
1425  * Returns -EINVAL if called with null arguments; otherwise, passes
1426  * along the return value of srcu_notifier_chain_unregister().
1427  */
1428 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1429 {
1430 	struct clk_notifier *cn = NULL;
1431 	int ret = -EINVAL;
1432 
1433 	if (!clk || !nb)
1434 		return -EINVAL;
1435 
1436 	mutex_lock(&prepare_lock);
1437 
1438 	list_for_each_entry(cn, &clk_notifier_list, node)
1439 		if (cn->clk == clk)
1440 			break;
1441 
1442 	if (cn->clk == clk) {
1443 		ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1444 
1445 		clk->notifier_count--;
1446 
1447 		/* XXX the notifier code should handle this better */
1448 		if (!cn->notifier_head.head) {
1449 			srcu_cleanup_notifier_head(&cn->notifier_head);
1450 			kfree(cn);
1451 		}
1452 
1453 	} else {
1454 		ret = -ENOENT;
1455 	}
1456 
1457 	mutex_unlock(&prepare_lock);
1458 
1459 	return ret;
1460 }
1461 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
1462