xref: /linux/drivers/clk/clk.c (revision 26b0d14106954ae46d2f4f7eec3481828a210f7d)
1 /*
2  * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3  * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * Standard functionality for the common clock API.  See Documentation/clk.txt
10  */
11 
12 #include <linux/clk-private.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/spinlock.h>
16 #include <linux/err.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 
20 static DEFINE_SPINLOCK(enable_lock);
21 static DEFINE_MUTEX(prepare_lock);
22 
23 static HLIST_HEAD(clk_root_list);
24 static HLIST_HEAD(clk_orphan_list);
25 static LIST_HEAD(clk_notifier_list);
26 
27 /***        debugfs support        ***/
28 
29 #ifdef CONFIG_COMMON_CLK_DEBUG
30 #include <linux/debugfs.h>
31 
32 static struct dentry *rootdir;
33 static struct dentry *orphandir;
34 static int inited = 0;
35 
36 /* caller must hold prepare_lock */
37 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
38 {
39 	struct dentry *d;
40 	int ret = -ENOMEM;
41 
42 	if (!clk || !pdentry) {
43 		ret = -EINVAL;
44 		goto out;
45 	}
46 
47 	d = debugfs_create_dir(clk->name, pdentry);
48 	if (!d)
49 		goto out;
50 
51 	clk->dentry = d;
52 
53 	d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
54 			(u32 *)&clk->rate);
55 	if (!d)
56 		goto err_out;
57 
58 	d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
59 			(u32 *)&clk->flags);
60 	if (!d)
61 		goto err_out;
62 
63 	d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
64 			(u32 *)&clk->prepare_count);
65 	if (!d)
66 		goto err_out;
67 
68 	d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
69 			(u32 *)&clk->enable_count);
70 	if (!d)
71 		goto err_out;
72 
73 	d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
74 			(u32 *)&clk->notifier_count);
75 	if (!d)
76 		goto err_out;
77 
78 	ret = 0;
79 	goto out;
80 
81 err_out:
82 	debugfs_remove(clk->dentry);
83 out:
84 	return ret;
85 }
86 
87 /* caller must hold prepare_lock */
88 static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
89 {
90 	struct clk *child;
91 	struct hlist_node *tmp;
92 	int ret = -EINVAL;;
93 
94 	if (!clk || !pdentry)
95 		goto out;
96 
97 	ret = clk_debug_create_one(clk, pdentry);
98 
99 	if (ret)
100 		goto out;
101 
102 	hlist_for_each_entry(child, tmp, &clk->children, child_node)
103 		clk_debug_create_subtree(child, clk->dentry);
104 
105 	ret = 0;
106 out:
107 	return ret;
108 }
109 
110 /**
111  * clk_debug_register - add a clk node to the debugfs clk tree
112  * @clk: the clk being added to the debugfs clk tree
113  *
114  * Dynamically adds a clk to the debugfs clk tree if debugfs has been
115  * initialized.  Otherwise it bails out early since the debugfs clk tree
116  * will be created lazily by clk_debug_init as part of a late_initcall.
117  *
118  * Caller must hold prepare_lock.  Only clk_init calls this function (so
119  * far) so this is taken care.
120  */
121 static int clk_debug_register(struct clk *clk)
122 {
123 	struct clk *parent;
124 	struct dentry *pdentry;
125 	int ret = 0;
126 
127 	if (!inited)
128 		goto out;
129 
130 	parent = clk->parent;
131 
132 	/*
133 	 * Check to see if a clk is a root clk.  Also check that it is
134 	 * safe to add this clk to debugfs
135 	 */
136 	if (!parent)
137 		if (clk->flags & CLK_IS_ROOT)
138 			pdentry = rootdir;
139 		else
140 			pdentry = orphandir;
141 	else
142 		if (parent->dentry)
143 			pdentry = parent->dentry;
144 		else
145 			goto out;
146 
147 	ret = clk_debug_create_subtree(clk, pdentry);
148 
149 out:
150 	return ret;
151 }
152 
153 /**
154  * clk_debug_init - lazily create the debugfs clk tree visualization
155  *
156  * clks are often initialized very early during boot before memory can
157  * be dynamically allocated and well before debugfs is setup.
158  * clk_debug_init walks the clk tree hierarchy while holding
159  * prepare_lock and creates the topology as part of a late_initcall,
160  * thus insuring that clks initialized very early will still be
161  * represented in the debugfs clk tree.  This function should only be
162  * called once at boot-time, and all other clks added dynamically will
163  * be done so with clk_debug_register.
164  */
165 static int __init clk_debug_init(void)
166 {
167 	struct clk *clk;
168 	struct hlist_node *tmp;
169 
170 	rootdir = debugfs_create_dir("clk", NULL);
171 
172 	if (!rootdir)
173 		return -ENOMEM;
174 
175 	orphandir = debugfs_create_dir("orphans", rootdir);
176 
177 	if (!orphandir)
178 		return -ENOMEM;
179 
180 	mutex_lock(&prepare_lock);
181 
182 	hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
183 		clk_debug_create_subtree(clk, rootdir);
184 
185 	hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
186 		clk_debug_create_subtree(clk, orphandir);
187 
188 	inited = 1;
189 
190 	mutex_unlock(&prepare_lock);
191 
192 	return 0;
193 }
194 late_initcall(clk_debug_init);
195 #else
196 static inline int clk_debug_register(struct clk *clk) { return 0; }
197 #endif
198 
199 /* caller must hold prepare_lock */
200 static void clk_disable_unused_subtree(struct clk *clk)
201 {
202 	struct clk *child;
203 	struct hlist_node *tmp;
204 	unsigned long flags;
205 
206 	if (!clk)
207 		goto out;
208 
209 	hlist_for_each_entry(child, tmp, &clk->children, child_node)
210 		clk_disable_unused_subtree(child);
211 
212 	spin_lock_irqsave(&enable_lock, flags);
213 
214 	if (clk->enable_count)
215 		goto unlock_out;
216 
217 	if (clk->flags & CLK_IGNORE_UNUSED)
218 		goto unlock_out;
219 
220 	if (__clk_is_enabled(clk) && clk->ops->disable)
221 		clk->ops->disable(clk->hw);
222 
223 unlock_out:
224 	spin_unlock_irqrestore(&enable_lock, flags);
225 
226 out:
227 	return;
228 }
229 
230 static int clk_disable_unused(void)
231 {
232 	struct clk *clk;
233 	struct hlist_node *tmp;
234 
235 	mutex_lock(&prepare_lock);
236 
237 	hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
238 		clk_disable_unused_subtree(clk);
239 
240 	hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
241 		clk_disable_unused_subtree(clk);
242 
243 	mutex_unlock(&prepare_lock);
244 
245 	return 0;
246 }
247 late_initcall(clk_disable_unused);
248 
249 /***    helper functions   ***/
250 
251 inline const char *__clk_get_name(struct clk *clk)
252 {
253 	return !clk ? NULL : clk->name;
254 }
255 
256 inline struct clk_hw *__clk_get_hw(struct clk *clk)
257 {
258 	return !clk ? NULL : clk->hw;
259 }
260 
261 inline u8 __clk_get_num_parents(struct clk *clk)
262 {
263 	return !clk ? -EINVAL : clk->num_parents;
264 }
265 
266 inline struct clk *__clk_get_parent(struct clk *clk)
267 {
268 	return !clk ? NULL : clk->parent;
269 }
270 
271 inline int __clk_get_enable_count(struct clk *clk)
272 {
273 	return !clk ? -EINVAL : clk->enable_count;
274 }
275 
276 inline int __clk_get_prepare_count(struct clk *clk)
277 {
278 	return !clk ? -EINVAL : clk->prepare_count;
279 }
280 
281 unsigned long __clk_get_rate(struct clk *clk)
282 {
283 	unsigned long ret;
284 
285 	if (!clk) {
286 		ret = 0;
287 		goto out;
288 	}
289 
290 	ret = clk->rate;
291 
292 	if (clk->flags & CLK_IS_ROOT)
293 		goto out;
294 
295 	if (!clk->parent)
296 		ret = 0;
297 
298 out:
299 	return ret;
300 }
301 
302 inline unsigned long __clk_get_flags(struct clk *clk)
303 {
304 	return !clk ? -EINVAL : clk->flags;
305 }
306 
307 int __clk_is_enabled(struct clk *clk)
308 {
309 	int ret;
310 
311 	if (!clk)
312 		return -EINVAL;
313 
314 	/*
315 	 * .is_enabled is only mandatory for clocks that gate
316 	 * fall back to software usage counter if .is_enabled is missing
317 	 */
318 	if (!clk->ops->is_enabled) {
319 		ret = clk->enable_count ? 1 : 0;
320 		goto out;
321 	}
322 
323 	ret = clk->ops->is_enabled(clk->hw);
324 out:
325 	return ret;
326 }
327 
328 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
329 {
330 	struct clk *child;
331 	struct clk *ret;
332 	struct hlist_node *tmp;
333 
334 	if (!strcmp(clk->name, name))
335 		return clk;
336 
337 	hlist_for_each_entry(child, tmp, &clk->children, child_node) {
338 		ret = __clk_lookup_subtree(name, child);
339 		if (ret)
340 			return ret;
341 	}
342 
343 	return NULL;
344 }
345 
346 struct clk *__clk_lookup(const char *name)
347 {
348 	struct clk *root_clk;
349 	struct clk *ret;
350 	struct hlist_node *tmp;
351 
352 	if (!name)
353 		return NULL;
354 
355 	/* search the 'proper' clk tree first */
356 	hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
357 		ret = __clk_lookup_subtree(name, root_clk);
358 		if (ret)
359 			return ret;
360 	}
361 
362 	/* if not found, then search the orphan tree */
363 	hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
364 		ret = __clk_lookup_subtree(name, root_clk);
365 		if (ret)
366 			return ret;
367 	}
368 
369 	return NULL;
370 }
371 
372 /***        clk api        ***/
373 
374 void __clk_unprepare(struct clk *clk)
375 {
376 	if (!clk)
377 		return;
378 
379 	if (WARN_ON(clk->prepare_count == 0))
380 		return;
381 
382 	if (--clk->prepare_count > 0)
383 		return;
384 
385 	WARN_ON(clk->enable_count > 0);
386 
387 	if (clk->ops->unprepare)
388 		clk->ops->unprepare(clk->hw);
389 
390 	__clk_unprepare(clk->parent);
391 }
392 
393 /**
394  * clk_unprepare - undo preparation of a clock source
395  * @clk: the clk being unprepare
396  *
397  * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
398  * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
399  * if the operation may sleep.  One example is a clk which is accessed over
400  * I2c.  In the complex case a clk gate operation may require a fast and a slow
401  * part.  It is this reason that clk_unprepare and clk_disable are not mutually
402  * exclusive.  In fact clk_disable must be called before clk_unprepare.
403  */
404 void clk_unprepare(struct clk *clk)
405 {
406 	mutex_lock(&prepare_lock);
407 	__clk_unprepare(clk);
408 	mutex_unlock(&prepare_lock);
409 }
410 EXPORT_SYMBOL_GPL(clk_unprepare);
411 
412 int __clk_prepare(struct clk *clk)
413 {
414 	int ret = 0;
415 
416 	if (!clk)
417 		return 0;
418 
419 	if (clk->prepare_count == 0) {
420 		ret = __clk_prepare(clk->parent);
421 		if (ret)
422 			return ret;
423 
424 		if (clk->ops->prepare) {
425 			ret = clk->ops->prepare(clk->hw);
426 			if (ret) {
427 				__clk_unprepare(clk->parent);
428 				return ret;
429 			}
430 		}
431 	}
432 
433 	clk->prepare_count++;
434 
435 	return 0;
436 }
437 
438 /**
439  * clk_prepare - prepare a clock source
440  * @clk: the clk being prepared
441  *
442  * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
443  * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
444  * operation may sleep.  One example is a clk which is accessed over I2c.  In
445  * the complex case a clk ungate operation may require a fast and a slow part.
446  * It is this reason that clk_prepare and clk_enable are not mutually
447  * exclusive.  In fact clk_prepare must be called before clk_enable.
448  * Returns 0 on success, -EERROR otherwise.
449  */
450 int clk_prepare(struct clk *clk)
451 {
452 	int ret;
453 
454 	mutex_lock(&prepare_lock);
455 	ret = __clk_prepare(clk);
456 	mutex_unlock(&prepare_lock);
457 
458 	return ret;
459 }
460 EXPORT_SYMBOL_GPL(clk_prepare);
461 
462 static void __clk_disable(struct clk *clk)
463 {
464 	if (!clk)
465 		return;
466 
467 	if (WARN_ON(clk->enable_count == 0))
468 		return;
469 
470 	if (--clk->enable_count > 0)
471 		return;
472 
473 	if (clk->ops->disable)
474 		clk->ops->disable(clk->hw);
475 
476 	__clk_disable(clk->parent);
477 }
478 
479 /**
480  * clk_disable - gate a clock
481  * @clk: the clk being gated
482  *
483  * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
484  * a simple case, clk_disable can be used instead of clk_unprepare to gate a
485  * clk if the operation is fast and will never sleep.  One example is a
486  * SoC-internal clk which is controlled via simple register writes.  In the
487  * complex case a clk gate operation may require a fast and a slow part.  It is
488  * this reason that clk_unprepare and clk_disable are not mutually exclusive.
489  * In fact clk_disable must be called before clk_unprepare.
490  */
491 void clk_disable(struct clk *clk)
492 {
493 	unsigned long flags;
494 
495 	spin_lock_irqsave(&enable_lock, flags);
496 	__clk_disable(clk);
497 	spin_unlock_irqrestore(&enable_lock, flags);
498 }
499 EXPORT_SYMBOL_GPL(clk_disable);
500 
501 static int __clk_enable(struct clk *clk)
502 {
503 	int ret = 0;
504 
505 	if (!clk)
506 		return 0;
507 
508 	if (WARN_ON(clk->prepare_count == 0))
509 		return -ESHUTDOWN;
510 
511 	if (clk->enable_count == 0) {
512 		ret = __clk_enable(clk->parent);
513 
514 		if (ret)
515 			return ret;
516 
517 		if (clk->ops->enable) {
518 			ret = clk->ops->enable(clk->hw);
519 			if (ret) {
520 				__clk_disable(clk->parent);
521 				return ret;
522 			}
523 		}
524 	}
525 
526 	clk->enable_count++;
527 	return 0;
528 }
529 
530 /**
531  * clk_enable - ungate a clock
532  * @clk: the clk being ungated
533  *
534  * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
535  * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
536  * if the operation will never sleep.  One example is a SoC-internal clk which
537  * is controlled via simple register writes.  In the complex case a clk ungate
538  * operation may require a fast and a slow part.  It is this reason that
539  * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
540  * must be called before clk_enable.  Returns 0 on success, -EERROR
541  * otherwise.
542  */
543 int clk_enable(struct clk *clk)
544 {
545 	unsigned long flags;
546 	int ret;
547 
548 	spin_lock_irqsave(&enable_lock, flags);
549 	ret = __clk_enable(clk);
550 	spin_unlock_irqrestore(&enable_lock, flags);
551 
552 	return ret;
553 }
554 EXPORT_SYMBOL_GPL(clk_enable);
555 
556 /**
557  * clk_get_rate - return the rate of clk
558  * @clk: the clk whose rate is being returned
559  *
560  * Simply returns the cached rate of the clk.  Does not query the hardware.  If
561  * clk is NULL then returns 0.
562  */
563 unsigned long clk_get_rate(struct clk *clk)
564 {
565 	unsigned long rate;
566 
567 	mutex_lock(&prepare_lock);
568 	rate = __clk_get_rate(clk);
569 	mutex_unlock(&prepare_lock);
570 
571 	return rate;
572 }
573 EXPORT_SYMBOL_GPL(clk_get_rate);
574 
575 /**
576  * __clk_round_rate - round the given rate for a clk
577  * @clk: round the rate of this clock
578  *
579  * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
580  */
581 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
582 {
583 	unsigned long parent_rate = 0;
584 
585 	if (!clk)
586 		return -EINVAL;
587 
588 	if (!clk->ops->round_rate) {
589 		if (clk->flags & CLK_SET_RATE_PARENT)
590 			return __clk_round_rate(clk->parent, rate);
591 		else
592 			return clk->rate;
593 	}
594 
595 	if (clk->parent)
596 		parent_rate = clk->parent->rate;
597 
598 	return clk->ops->round_rate(clk->hw, rate, &parent_rate);
599 }
600 
601 /**
602  * clk_round_rate - round the given rate for a clk
603  * @clk: the clk for which we are rounding a rate
604  * @rate: the rate which is to be rounded
605  *
606  * Takes in a rate as input and rounds it to a rate that the clk can actually
607  * use which is then returned.  If clk doesn't support round_rate operation
608  * then the parent rate is returned.
609  */
610 long clk_round_rate(struct clk *clk, unsigned long rate)
611 {
612 	unsigned long ret;
613 
614 	mutex_lock(&prepare_lock);
615 	ret = __clk_round_rate(clk, rate);
616 	mutex_unlock(&prepare_lock);
617 
618 	return ret;
619 }
620 EXPORT_SYMBOL_GPL(clk_round_rate);
621 
622 /**
623  * __clk_notify - call clk notifier chain
624  * @clk: struct clk * that is changing rate
625  * @msg: clk notifier type (see include/linux/clk.h)
626  * @old_rate: old clk rate
627  * @new_rate: new clk rate
628  *
629  * Triggers a notifier call chain on the clk rate-change notification
630  * for 'clk'.  Passes a pointer to the struct clk and the previous
631  * and current rates to the notifier callback.  Intended to be called by
632  * internal clock code only.  Returns NOTIFY_DONE from the last driver
633  * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
634  * a driver returns that.
635  */
636 static int __clk_notify(struct clk *clk, unsigned long msg,
637 		unsigned long old_rate, unsigned long new_rate)
638 {
639 	struct clk_notifier *cn;
640 	struct clk_notifier_data cnd;
641 	int ret = NOTIFY_DONE;
642 
643 	cnd.clk = clk;
644 	cnd.old_rate = old_rate;
645 	cnd.new_rate = new_rate;
646 
647 	list_for_each_entry(cn, &clk_notifier_list, node) {
648 		if (cn->clk == clk) {
649 			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
650 					&cnd);
651 			break;
652 		}
653 	}
654 
655 	return ret;
656 }
657 
658 /**
659  * __clk_recalc_rates
660  * @clk: first clk in the subtree
661  * @msg: notification type (see include/linux/clk.h)
662  *
663  * Walks the subtree of clks starting with clk and recalculates rates as it
664  * goes.  Note that if a clk does not implement the .recalc_rate callback then
665  * it is assumed that the clock will take on the rate of it's parent.
666  *
667  * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
668  * if necessary.
669  *
670  * Caller must hold prepare_lock.
671  */
672 static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
673 {
674 	unsigned long old_rate;
675 	unsigned long parent_rate = 0;
676 	struct hlist_node *tmp;
677 	struct clk *child;
678 
679 	old_rate = clk->rate;
680 
681 	if (clk->parent)
682 		parent_rate = clk->parent->rate;
683 
684 	if (clk->ops->recalc_rate)
685 		clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
686 	else
687 		clk->rate = parent_rate;
688 
689 	/*
690 	 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
691 	 * & ABORT_RATE_CHANGE notifiers
692 	 */
693 	if (clk->notifier_count && msg)
694 		__clk_notify(clk, msg, old_rate, clk->rate);
695 
696 	hlist_for_each_entry(child, tmp, &clk->children, child_node)
697 		__clk_recalc_rates(child, msg);
698 }
699 
700 /**
701  * __clk_speculate_rates
702  * @clk: first clk in the subtree
703  * @parent_rate: the "future" rate of clk's parent
704  *
705  * Walks the subtree of clks starting with clk, speculating rates as it
706  * goes and firing off PRE_RATE_CHANGE notifications as necessary.
707  *
708  * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
709  * pre-rate change notifications and returns early if no clks in the
710  * subtree have subscribed to the notifications.  Note that if a clk does not
711  * implement the .recalc_rate callback then it is assumed that the clock will
712  * take on the rate of it's parent.
713  *
714  * Caller must hold prepare_lock.
715  */
716 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
717 {
718 	struct hlist_node *tmp;
719 	struct clk *child;
720 	unsigned long new_rate;
721 	int ret = NOTIFY_DONE;
722 
723 	if (clk->ops->recalc_rate)
724 		new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
725 	else
726 		new_rate = parent_rate;
727 
728 	/* abort the rate change if a driver returns NOTIFY_BAD */
729 	if (clk->notifier_count)
730 		ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
731 
732 	if (ret == NOTIFY_BAD)
733 		goto out;
734 
735 	hlist_for_each_entry(child, tmp, &clk->children, child_node) {
736 		ret = __clk_speculate_rates(child, new_rate);
737 		if (ret == NOTIFY_BAD)
738 			break;
739 	}
740 
741 out:
742 	return ret;
743 }
744 
745 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
746 {
747 	struct clk *child;
748 	struct hlist_node *tmp;
749 
750 	clk->new_rate = new_rate;
751 
752 	hlist_for_each_entry(child, tmp, &clk->children, child_node) {
753 		if (child->ops->recalc_rate)
754 			child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
755 		else
756 			child->new_rate = new_rate;
757 		clk_calc_subtree(child, child->new_rate);
758 	}
759 }
760 
761 /*
762  * calculate the new rates returning the topmost clock that has to be
763  * changed.
764  */
765 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
766 {
767 	struct clk *top = clk;
768 	unsigned long best_parent_rate = 0;
769 	unsigned long new_rate;
770 
771 	/* sanity */
772 	if (IS_ERR_OR_NULL(clk))
773 		return NULL;
774 
775 	/* save parent rate, if it exists */
776 	if (clk->parent)
777 		best_parent_rate = clk->parent->rate;
778 
779 	/* never propagate up to the parent */
780 	if (!(clk->flags & CLK_SET_RATE_PARENT)) {
781 		if (!clk->ops->round_rate) {
782 			clk->new_rate = clk->rate;
783 			return NULL;
784 		}
785 		new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
786 		goto out;
787 	}
788 
789 	/* need clk->parent from here on out */
790 	if (!clk->parent) {
791 		pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
792 		return NULL;
793 	}
794 
795 	if (!clk->ops->round_rate) {
796 		top = clk_calc_new_rates(clk->parent, rate);
797 		new_rate = clk->parent->new_rate;
798 
799 		goto out;
800 	}
801 
802 	new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
803 
804 	if (best_parent_rate != clk->parent->rate) {
805 		top = clk_calc_new_rates(clk->parent, best_parent_rate);
806 
807 		goto out;
808 	}
809 
810 out:
811 	clk_calc_subtree(clk, new_rate);
812 
813 	return top;
814 }
815 
816 /*
817  * Notify about rate changes in a subtree. Always walk down the whole tree
818  * so that in case of an error we can walk down the whole tree again and
819  * abort the change.
820  */
821 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
822 {
823 	struct hlist_node *tmp;
824 	struct clk *child, *fail_clk = NULL;
825 	int ret = NOTIFY_DONE;
826 
827 	if (clk->rate == clk->new_rate)
828 		return 0;
829 
830 	if (clk->notifier_count) {
831 		ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
832 		if (ret == NOTIFY_BAD)
833 			fail_clk = clk;
834 	}
835 
836 	hlist_for_each_entry(child, tmp, &clk->children, child_node) {
837 		clk = clk_propagate_rate_change(child, event);
838 		if (clk)
839 			fail_clk = clk;
840 	}
841 
842 	return fail_clk;
843 }
844 
845 /*
846  * walk down a subtree and set the new rates notifying the rate
847  * change on the way
848  */
849 static void clk_change_rate(struct clk *clk)
850 {
851 	struct clk *child;
852 	unsigned long old_rate;
853 	unsigned long best_parent_rate = 0;
854 	struct hlist_node *tmp;
855 
856 	old_rate = clk->rate;
857 
858 	if (clk->parent)
859 		best_parent_rate = clk->parent->rate;
860 
861 	if (clk->ops->set_rate)
862 		clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
863 
864 	if (clk->ops->recalc_rate)
865 		clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
866 	else
867 		clk->rate = best_parent_rate;
868 
869 	if (clk->notifier_count && old_rate != clk->rate)
870 		__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
871 
872 	hlist_for_each_entry(child, tmp, &clk->children, child_node)
873 		clk_change_rate(child);
874 }
875 
876 /**
877  * clk_set_rate - specify a new rate for clk
878  * @clk: the clk whose rate is being changed
879  * @rate: the new rate for clk
880  *
881  * In the simplest case clk_set_rate will only adjust the rate of clk.
882  *
883  * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
884  * propagate up to clk's parent; whether or not this happens depends on the
885  * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
886  * after calling .round_rate then upstream parent propagation is ignored.  If
887  * *parent_rate comes back with a new rate for clk's parent then we propagate
888  * up to clk's parent and set it's rate.  Upward propagation will continue
889  * until either a clk does not support the CLK_SET_RATE_PARENT flag or
890  * .round_rate stops requesting changes to clk's parent_rate.
891  *
892  * Rate changes are accomplished via tree traversal that also recalculates the
893  * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
894  *
895  * Returns 0 on success, -EERROR otherwise.
896  */
897 int clk_set_rate(struct clk *clk, unsigned long rate)
898 {
899 	struct clk *top, *fail_clk;
900 	int ret = 0;
901 
902 	/* prevent racing with updates to the clock topology */
903 	mutex_lock(&prepare_lock);
904 
905 	/* bail early if nothing to do */
906 	if (rate == clk->rate)
907 		goto out;
908 
909 	if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
910 		ret = -EBUSY;
911 		goto out;
912 	}
913 
914 	/* calculate new rates and get the topmost changed clock */
915 	top = clk_calc_new_rates(clk, rate);
916 	if (!top) {
917 		ret = -EINVAL;
918 		goto out;
919 	}
920 
921 	/* notify that we are about to change rates */
922 	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
923 	if (fail_clk) {
924 		pr_warn("%s: failed to set %s rate\n", __func__,
925 				fail_clk->name);
926 		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
927 		ret = -EBUSY;
928 		goto out;
929 	}
930 
931 	/* change the rates */
932 	clk_change_rate(top);
933 
934 	mutex_unlock(&prepare_lock);
935 
936 	return 0;
937 out:
938 	mutex_unlock(&prepare_lock);
939 
940 	return ret;
941 }
942 EXPORT_SYMBOL_GPL(clk_set_rate);
943 
944 /**
945  * clk_get_parent - return the parent of a clk
946  * @clk: the clk whose parent gets returned
947  *
948  * Simply returns clk->parent.  Returns NULL if clk is NULL.
949  */
950 struct clk *clk_get_parent(struct clk *clk)
951 {
952 	struct clk *parent;
953 
954 	mutex_lock(&prepare_lock);
955 	parent = __clk_get_parent(clk);
956 	mutex_unlock(&prepare_lock);
957 
958 	return parent;
959 }
960 EXPORT_SYMBOL_GPL(clk_get_parent);
961 
962 /*
963  * .get_parent is mandatory for clocks with multiple possible parents.  It is
964  * optional for single-parent clocks.  Always call .get_parent if it is
965  * available and WARN if it is missing for multi-parent clocks.
966  *
967  * For single-parent clocks without .get_parent, first check to see if the
968  * .parents array exists, and if so use it to avoid an expensive tree
969  * traversal.  If .parents does not exist then walk the tree with __clk_lookup.
970  */
971 static struct clk *__clk_init_parent(struct clk *clk)
972 {
973 	struct clk *ret = NULL;
974 	u8 index;
975 
976 	/* handle the trivial cases */
977 
978 	if (!clk->num_parents)
979 		goto out;
980 
981 	if (clk->num_parents == 1) {
982 		if (IS_ERR_OR_NULL(clk->parent))
983 			ret = clk->parent = __clk_lookup(clk->parent_names[0]);
984 		ret = clk->parent;
985 		goto out;
986 	}
987 
988 	if (!clk->ops->get_parent) {
989 		WARN(!clk->ops->get_parent,
990 			"%s: multi-parent clocks must implement .get_parent\n",
991 			__func__);
992 		goto out;
993 	};
994 
995 	/*
996 	 * Do our best to cache parent clocks in clk->parents.  This prevents
997 	 * unnecessary and expensive calls to __clk_lookup.  We don't set
998 	 * clk->parent here; that is done by the calling function
999 	 */
1000 
1001 	index = clk->ops->get_parent(clk->hw);
1002 
1003 	if (!clk->parents)
1004 		clk->parents =
1005 			kzalloc((sizeof(struct clk*) * clk->num_parents),
1006 					GFP_KERNEL);
1007 
1008 	if (!clk->parents)
1009 		ret = __clk_lookup(clk->parent_names[index]);
1010 	else if (!clk->parents[index])
1011 		ret = clk->parents[index] =
1012 			__clk_lookup(clk->parent_names[index]);
1013 	else
1014 		ret = clk->parents[index];
1015 
1016 out:
1017 	return ret;
1018 }
1019 
1020 void __clk_reparent(struct clk *clk, struct clk *new_parent)
1021 {
1022 #ifdef CONFIG_COMMON_CLK_DEBUG
1023 	struct dentry *d;
1024 	struct dentry *new_parent_d;
1025 #endif
1026 
1027 	if (!clk || !new_parent)
1028 		return;
1029 
1030 	hlist_del(&clk->child_node);
1031 
1032 	if (new_parent)
1033 		hlist_add_head(&clk->child_node, &new_parent->children);
1034 	else
1035 		hlist_add_head(&clk->child_node, &clk_orphan_list);
1036 
1037 #ifdef CONFIG_COMMON_CLK_DEBUG
1038 	if (!inited)
1039 		goto out;
1040 
1041 	if (new_parent)
1042 		new_parent_d = new_parent->dentry;
1043 	else
1044 		new_parent_d = orphandir;
1045 
1046 	d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1047 			new_parent_d, clk->name);
1048 	if (d)
1049 		clk->dentry = d;
1050 	else
1051 		pr_debug("%s: failed to rename debugfs entry for %s\n",
1052 				__func__, clk->name);
1053 out:
1054 #endif
1055 
1056 	clk->parent = new_parent;
1057 
1058 	__clk_recalc_rates(clk, POST_RATE_CHANGE);
1059 }
1060 
1061 static int __clk_set_parent(struct clk *clk, struct clk *parent)
1062 {
1063 	struct clk *old_parent;
1064 	unsigned long flags;
1065 	int ret = -EINVAL;
1066 	u8 i;
1067 
1068 	old_parent = clk->parent;
1069 
1070 	if (!clk->parents)
1071 		clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1072 								GFP_KERNEL);
1073 
1074 	/*
1075 	 * find index of new parent clock using cached parent ptrs,
1076 	 * or if not yet cached, use string name comparison and cache
1077 	 * them now to avoid future calls to __clk_lookup.
1078 	 */
1079 	for (i = 0; i < clk->num_parents; i++) {
1080 		if (clk->parents && clk->parents[i] == parent)
1081 			break;
1082 		else if (!strcmp(clk->parent_names[i], parent->name)) {
1083 			if (clk->parents)
1084 				clk->parents[i] = __clk_lookup(parent->name);
1085 			break;
1086 		}
1087 	}
1088 
1089 	if (i == clk->num_parents) {
1090 		pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1091 				__func__, parent->name, clk->name);
1092 		goto out;
1093 	}
1094 
1095 	/* migrate prepare and enable */
1096 	if (clk->prepare_count)
1097 		__clk_prepare(parent);
1098 
1099 	/* FIXME replace with clk_is_enabled(clk) someday */
1100 	spin_lock_irqsave(&enable_lock, flags);
1101 	if (clk->enable_count)
1102 		__clk_enable(parent);
1103 	spin_unlock_irqrestore(&enable_lock, flags);
1104 
1105 	/* change clock input source */
1106 	ret = clk->ops->set_parent(clk->hw, i);
1107 
1108 	/* clean up old prepare and enable */
1109 	spin_lock_irqsave(&enable_lock, flags);
1110 	if (clk->enable_count)
1111 		__clk_disable(old_parent);
1112 	spin_unlock_irqrestore(&enable_lock, flags);
1113 
1114 	if (clk->prepare_count)
1115 		__clk_unprepare(old_parent);
1116 
1117 out:
1118 	return ret;
1119 }
1120 
1121 /**
1122  * clk_set_parent - switch the parent of a mux clk
1123  * @clk: the mux clk whose input we are switching
1124  * @parent: the new input to clk
1125  *
1126  * Re-parent clk to use parent as it's new input source.  If clk has the
1127  * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1128  * operation to succeed.  After successfully changing clk's parent
1129  * clk_set_parent will update the clk topology, sysfs topology and
1130  * propagate rate recalculation via __clk_recalc_rates.  Returns 0 on
1131  * success, -EERROR otherwise.
1132  */
1133 int clk_set_parent(struct clk *clk, struct clk *parent)
1134 {
1135 	int ret = 0;
1136 
1137 	if (!clk || !clk->ops)
1138 		return -EINVAL;
1139 
1140 	if (!clk->ops->set_parent)
1141 		return -ENOSYS;
1142 
1143 	/* prevent racing with updates to the clock topology */
1144 	mutex_lock(&prepare_lock);
1145 
1146 	if (clk->parent == parent)
1147 		goto out;
1148 
1149 	/* propagate PRE_RATE_CHANGE notifications */
1150 	if (clk->notifier_count)
1151 		ret = __clk_speculate_rates(clk, parent->rate);
1152 
1153 	/* abort if a driver objects */
1154 	if (ret == NOTIFY_STOP)
1155 		goto out;
1156 
1157 	/* only re-parent if the clock is not in use */
1158 	if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1159 		ret = -EBUSY;
1160 	else
1161 		ret = __clk_set_parent(clk, parent);
1162 
1163 	/* propagate ABORT_RATE_CHANGE if .set_parent failed */
1164 	if (ret) {
1165 		__clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1166 		goto out;
1167 	}
1168 
1169 	/* propagate rate recalculation downstream */
1170 	__clk_reparent(clk, parent);
1171 
1172 out:
1173 	mutex_unlock(&prepare_lock);
1174 
1175 	return ret;
1176 }
1177 EXPORT_SYMBOL_GPL(clk_set_parent);
1178 
1179 /**
1180  * __clk_init - initialize the data structures in a struct clk
1181  * @dev:	device initializing this clk, placeholder for now
1182  * @clk:	clk being initialized
1183  *
1184  * Initializes the lists in struct clk, queries the hardware for the
1185  * parent and rate and sets them both.
1186  */
1187 int __clk_init(struct device *dev, struct clk *clk)
1188 {
1189 	int i, ret = 0;
1190 	struct clk *orphan;
1191 	struct hlist_node *tmp, *tmp2;
1192 
1193 	if (!clk)
1194 		return -EINVAL;
1195 
1196 	mutex_lock(&prepare_lock);
1197 
1198 	/* check to see if a clock with this name is already registered */
1199 	if (__clk_lookup(clk->name)) {
1200 		pr_debug("%s: clk %s already initialized\n",
1201 				__func__, clk->name);
1202 		ret = -EEXIST;
1203 		goto out;
1204 	}
1205 
1206 	/* check that clk_ops are sane.  See Documentation/clk.txt */
1207 	if (clk->ops->set_rate &&
1208 			!(clk->ops->round_rate && clk->ops->recalc_rate)) {
1209 		pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1210 				__func__, clk->name);
1211 		ret = -EINVAL;
1212 		goto out;
1213 	}
1214 
1215 	if (clk->ops->set_parent && !clk->ops->get_parent) {
1216 		pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1217 				__func__, clk->name);
1218 		ret = -EINVAL;
1219 		goto out;
1220 	}
1221 
1222 	/* throw a WARN if any entries in parent_names are NULL */
1223 	for (i = 0; i < clk->num_parents; i++)
1224 		WARN(!clk->parent_names[i],
1225 				"%s: invalid NULL in %s's .parent_names\n",
1226 				__func__, clk->name);
1227 
1228 	/*
1229 	 * Allocate an array of struct clk *'s to avoid unnecessary string
1230 	 * look-ups of clk's possible parents.  This can fail for clocks passed
1231 	 * in to clk_init during early boot; thus any access to clk->parents[]
1232 	 * must always check for a NULL pointer and try to populate it if
1233 	 * necessary.
1234 	 *
1235 	 * If clk->parents is not NULL we skip this entire block.  This allows
1236 	 * for clock drivers to statically initialize clk->parents.
1237 	 */
1238 	if (clk->num_parents && !clk->parents) {
1239 		clk->parents = kmalloc((sizeof(struct clk*) * clk->num_parents),
1240 				GFP_KERNEL);
1241 		/*
1242 		 * __clk_lookup returns NULL for parents that have not been
1243 		 * clk_init'd; thus any access to clk->parents[] must check
1244 		 * for a NULL pointer.  We can always perform lazy lookups for
1245 		 * missing parents later on.
1246 		 */
1247 		if (clk->parents)
1248 			for (i = 0; i < clk->num_parents; i++)
1249 				clk->parents[i] =
1250 					__clk_lookup(clk->parent_names[i]);
1251 	}
1252 
1253 	clk->parent = __clk_init_parent(clk);
1254 
1255 	/*
1256 	 * Populate clk->parent if parent has already been __clk_init'd.  If
1257 	 * parent has not yet been __clk_init'd then place clk in the orphan
1258 	 * list.  If clk has set the CLK_IS_ROOT flag then place it in the root
1259 	 * clk list.
1260 	 *
1261 	 * Every time a new clk is clk_init'd then we walk the list of orphan
1262 	 * clocks and re-parent any that are children of the clock currently
1263 	 * being clk_init'd.
1264 	 */
1265 	if (clk->parent)
1266 		hlist_add_head(&clk->child_node,
1267 				&clk->parent->children);
1268 	else if (clk->flags & CLK_IS_ROOT)
1269 		hlist_add_head(&clk->child_node, &clk_root_list);
1270 	else
1271 		hlist_add_head(&clk->child_node, &clk_orphan_list);
1272 
1273 	/*
1274 	 * Set clk's rate.  The preferred method is to use .recalc_rate.  For
1275 	 * simple clocks and lazy developers the default fallback is to use the
1276 	 * parent's rate.  If a clock doesn't have a parent (or is orphaned)
1277 	 * then rate is set to zero.
1278 	 */
1279 	if (clk->ops->recalc_rate)
1280 		clk->rate = clk->ops->recalc_rate(clk->hw,
1281 				__clk_get_rate(clk->parent));
1282 	else if (clk->parent)
1283 		clk->rate = clk->parent->rate;
1284 	else
1285 		clk->rate = 0;
1286 
1287 	/*
1288 	 * walk the list of orphan clocks and reparent any that are children of
1289 	 * this clock
1290 	 */
1291 	hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node)
1292 		for (i = 0; i < orphan->num_parents; i++)
1293 			if (!strcmp(clk->name, orphan->parent_names[i])) {
1294 				__clk_reparent(orphan, clk);
1295 				break;
1296 			}
1297 
1298 	/*
1299 	 * optional platform-specific magic
1300 	 *
1301 	 * The .init callback is not used by any of the basic clock types, but
1302 	 * exists for weird hardware that must perform initialization magic.
1303 	 * Please consider other ways of solving initialization problems before
1304 	 * using this callback, as it's use is discouraged.
1305 	 */
1306 	if (clk->ops->init)
1307 		clk->ops->init(clk->hw);
1308 
1309 	clk_debug_register(clk);
1310 
1311 out:
1312 	mutex_unlock(&prepare_lock);
1313 
1314 	return ret;
1315 }
1316 
1317 /**
1318  * __clk_register - register a clock and return a cookie.
1319  *
1320  * Same as clk_register, except that the .clk field inside hw shall point to a
1321  * preallocated (generally statically allocated) struct clk. None of the fields
1322  * of the struct clk need to be initialized.
1323  *
1324  * The data pointed to by .init and .clk field shall NOT be marked as init
1325  * data.
1326  *
1327  * __clk_register is only exposed via clk-private.h and is intended for use with
1328  * very large numbers of clocks that need to be statically initialized.  It is
1329  * a layering violation to include clk-private.h from any code which implements
1330  * a clock's .ops; as such any statically initialized clock data MUST be in a
1331  * separate C file from the logic that implements it's operations.  Returns 0
1332  * on success, otherwise an error code.
1333  */
1334 struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1335 {
1336 	int ret;
1337 	struct clk *clk;
1338 
1339 	clk = hw->clk;
1340 	clk->name = hw->init->name;
1341 	clk->ops = hw->init->ops;
1342 	clk->hw = hw;
1343 	clk->flags = hw->init->flags;
1344 	clk->parent_names = hw->init->parent_names;
1345 	clk->num_parents = hw->init->num_parents;
1346 
1347 	ret = __clk_init(dev, clk);
1348 	if (ret)
1349 		return ERR_PTR(ret);
1350 
1351 	return clk;
1352 }
1353 EXPORT_SYMBOL_GPL(__clk_register);
1354 
1355 /**
1356  * clk_register - allocate a new clock, register it and return an opaque cookie
1357  * @dev: device that is registering this clock
1358  * @hw: link to hardware-specific clock data
1359  *
1360  * clk_register is the primary interface for populating the clock tree with new
1361  * clock nodes.  It returns a pointer to the newly allocated struct clk which
1362  * cannot be dereferenced by driver code but may be used in conjuction with the
1363  * rest of the clock API.  In the event of an error clk_register will return an
1364  * error code; drivers must test for an error code after calling clk_register.
1365  */
1366 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1367 {
1368 	int i, ret;
1369 	struct clk *clk;
1370 
1371 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1372 	if (!clk) {
1373 		pr_err("%s: could not allocate clk\n", __func__);
1374 		ret = -ENOMEM;
1375 		goto fail_out;
1376 	}
1377 
1378 	clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1379 	if (!clk->name) {
1380 		pr_err("%s: could not allocate clk->name\n", __func__);
1381 		ret = -ENOMEM;
1382 		goto fail_name;
1383 	}
1384 	clk->ops = hw->init->ops;
1385 	clk->hw = hw;
1386 	clk->flags = hw->init->flags;
1387 	clk->num_parents = hw->init->num_parents;
1388 	hw->clk = clk;
1389 
1390 	/* allocate local copy in case parent_names is __initdata */
1391 	clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
1392 			GFP_KERNEL);
1393 
1394 	if (!clk->parent_names) {
1395 		pr_err("%s: could not allocate clk->parent_names\n", __func__);
1396 		ret = -ENOMEM;
1397 		goto fail_parent_names;
1398 	}
1399 
1400 
1401 	/* copy each string name in case parent_names is __initdata */
1402 	for (i = 0; i < clk->num_parents; i++) {
1403 		clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1404 						GFP_KERNEL);
1405 		if (!clk->parent_names[i]) {
1406 			pr_err("%s: could not copy parent_names\n", __func__);
1407 			ret = -ENOMEM;
1408 			goto fail_parent_names_copy;
1409 		}
1410 	}
1411 
1412 	ret = __clk_init(dev, clk);
1413 	if (!ret)
1414 		return clk;
1415 
1416 fail_parent_names_copy:
1417 	while (--i >= 0)
1418 		kfree(clk->parent_names[i]);
1419 	kfree(clk->parent_names);
1420 fail_parent_names:
1421 	kfree(clk->name);
1422 fail_name:
1423 	kfree(clk);
1424 fail_out:
1425 	return ERR_PTR(ret);
1426 }
1427 EXPORT_SYMBOL_GPL(clk_register);
1428 
1429 /**
1430  * clk_unregister - unregister a currently registered clock
1431  * @clk: clock to unregister
1432  *
1433  * Currently unimplemented.
1434  */
1435 void clk_unregister(struct clk *clk) {}
1436 EXPORT_SYMBOL_GPL(clk_unregister);
1437 
1438 /***        clk rate change notifiers        ***/
1439 
1440 /**
1441  * clk_notifier_register - add a clk rate change notifier
1442  * @clk: struct clk * to watch
1443  * @nb: struct notifier_block * with callback info
1444  *
1445  * Request notification when clk's rate changes.  This uses an SRCU
1446  * notifier because we want it to block and notifier unregistrations are
1447  * uncommon.  The callbacks associated with the notifier must not
1448  * re-enter into the clk framework by calling any top-level clk APIs;
1449  * this will cause a nested prepare_lock mutex.
1450  *
1451  * Pre-change notifier callbacks will be passed the current, pre-change
1452  * rate of the clk via struct clk_notifier_data.old_rate.  The new,
1453  * post-change rate of the clk is passed via struct
1454  * clk_notifier_data.new_rate.
1455  *
1456  * Post-change notifiers will pass the now-current, post-change rate of
1457  * the clk in both struct clk_notifier_data.old_rate and struct
1458  * clk_notifier_data.new_rate.
1459  *
1460  * Abort-change notifiers are effectively the opposite of pre-change
1461  * notifiers: the original pre-change clk rate is passed in via struct
1462  * clk_notifier_data.new_rate and the failed post-change rate is passed
1463  * in via struct clk_notifier_data.old_rate.
1464  *
1465  * clk_notifier_register() must be called from non-atomic context.
1466  * Returns -EINVAL if called with null arguments, -ENOMEM upon
1467  * allocation failure; otherwise, passes along the return value of
1468  * srcu_notifier_chain_register().
1469  */
1470 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1471 {
1472 	struct clk_notifier *cn;
1473 	int ret = -ENOMEM;
1474 
1475 	if (!clk || !nb)
1476 		return -EINVAL;
1477 
1478 	mutex_lock(&prepare_lock);
1479 
1480 	/* search the list of notifiers for this clk */
1481 	list_for_each_entry(cn, &clk_notifier_list, node)
1482 		if (cn->clk == clk)
1483 			break;
1484 
1485 	/* if clk wasn't in the notifier list, allocate new clk_notifier */
1486 	if (cn->clk != clk) {
1487 		cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1488 		if (!cn)
1489 			goto out;
1490 
1491 		cn->clk = clk;
1492 		srcu_init_notifier_head(&cn->notifier_head);
1493 
1494 		list_add(&cn->node, &clk_notifier_list);
1495 	}
1496 
1497 	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1498 
1499 	clk->notifier_count++;
1500 
1501 out:
1502 	mutex_unlock(&prepare_lock);
1503 
1504 	return ret;
1505 }
1506 EXPORT_SYMBOL_GPL(clk_notifier_register);
1507 
1508 /**
1509  * clk_notifier_unregister - remove a clk rate change notifier
1510  * @clk: struct clk *
1511  * @nb: struct notifier_block * with callback info
1512  *
1513  * Request no further notification for changes to 'clk' and frees memory
1514  * allocated in clk_notifier_register.
1515  *
1516  * Returns -EINVAL if called with null arguments; otherwise, passes
1517  * along the return value of srcu_notifier_chain_unregister().
1518  */
1519 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1520 {
1521 	struct clk_notifier *cn = NULL;
1522 	int ret = -EINVAL;
1523 
1524 	if (!clk || !nb)
1525 		return -EINVAL;
1526 
1527 	mutex_lock(&prepare_lock);
1528 
1529 	list_for_each_entry(cn, &clk_notifier_list, node)
1530 		if (cn->clk == clk)
1531 			break;
1532 
1533 	if (cn->clk == clk) {
1534 		ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1535 
1536 		clk->notifier_count--;
1537 
1538 		/* XXX the notifier code should handle this better */
1539 		if (!cn->notifier_head.head) {
1540 			srcu_cleanup_notifier_head(&cn->notifier_head);
1541 			kfree(cn);
1542 		}
1543 
1544 	} else {
1545 		ret = -ENOENT;
1546 	}
1547 
1548 	mutex_unlock(&prepare_lock);
1549 
1550 	return ret;
1551 }
1552 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
1553