xref: /linux/drivers/clk/clk.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
4  * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5  *
6  * Standard functionality for the common clock API.  See Documentation/driver-api/clk.rst
7  */
8 
9 #include <linux/clk/clk-conf.h>
10 #include <linux/clkdev.h>
11 #include <linux/clk.h>
12 #include <linux/clk-provider.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/hashtable.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/of.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/string.h>
26 #include <linux/stringhash.h>
27 
28 #include "clk.h"
29 
30 static DEFINE_SPINLOCK(enable_lock);
31 static DEFINE_MUTEX(prepare_lock);
32 
33 static struct task_struct *prepare_owner;
34 static struct task_struct *enable_owner;
35 
36 static int prepare_refcnt;
37 static int enable_refcnt;
38 
39 #define CLK_HASH_BITS 9
40 static DEFINE_HASHTABLE(clk_hashtable, CLK_HASH_BITS);
41 
42 static HLIST_HEAD(clk_root_list);
43 static HLIST_HEAD(clk_orphan_list);
44 static LIST_HEAD(clk_notifier_list);
45 
46 /* List of registered clks that use runtime PM */
47 static HLIST_HEAD(clk_rpm_list);
48 static DEFINE_MUTEX(clk_rpm_list_lock);
49 
50 static const struct hlist_head *all_lists[] = {
51 	&clk_root_list,
52 	&clk_orphan_list,
53 	NULL,
54 };
55 
56 /***    private data structures    ***/
57 
58 struct clk_parent_map {
59 	const struct clk_hw	*hw;
60 	struct clk_core		*core;
61 	const char		*fw_name;
62 	const char		*name;
63 	int			index;
64 };
65 
66 struct clk_core {
67 	const char		*name;
68 	const struct clk_ops	*ops;
69 	struct clk_hw		*hw;
70 	struct module		*owner;
71 	struct device		*dev;
72 	struct hlist_node	rpm_node;
73 	struct device_node	*of_node;
74 	struct clk_core		*parent;
75 	struct clk_parent_map	*parents;
76 	u8			num_parents;
77 	u8			new_parent_index;
78 	unsigned long		rate;
79 	unsigned long		req_rate;
80 	unsigned long		new_rate;
81 	struct clk_core		*new_parent;
82 	struct clk_core		*new_child;
83 	unsigned long		flags;
84 	bool			orphan;
85 	bool			rpm_enabled;
86 	unsigned int		enable_count;
87 	unsigned int		prepare_count;
88 	unsigned int		protect_count;
89 	unsigned long		min_rate;
90 	unsigned long		max_rate;
91 	unsigned long		accuracy;
92 	int			phase;
93 	struct clk_duty		duty;
94 	struct hlist_head	children;
95 	struct hlist_node	child_node;
96 	struct hlist_node	hashtable_node;
97 	struct hlist_head	clks;
98 	unsigned int		notifier_count;
99 #ifdef CONFIG_DEBUG_FS
100 	struct dentry		*dentry;
101 	struct hlist_node	debug_node;
102 #endif
103 	struct kref		ref;
104 };
105 
106 #define CREATE_TRACE_POINTS
107 #include <trace/events/clk.h>
108 
109 struct clk {
110 	struct clk_core	*core;
111 	struct device *dev;
112 	const char *dev_id;
113 	const char *con_id;
114 	unsigned long min_rate;
115 	unsigned long max_rate;
116 	unsigned int exclusive_count;
117 	struct hlist_node clks_node;
118 };
119 
120 /***           runtime pm          ***/
121 static int clk_pm_runtime_get(struct clk_core *core)
122 {
123 	if (!core->rpm_enabled)
124 		return 0;
125 
126 	return pm_runtime_resume_and_get(core->dev);
127 }
128 
129 static void clk_pm_runtime_put(struct clk_core *core)
130 {
131 	if (!core->rpm_enabled)
132 		return;
133 
134 	pm_runtime_put_sync(core->dev);
135 }
136 
137 /**
138  * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
139  *
140  * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
141  * that disabling unused clks avoids a deadlock where a device is runtime PM
142  * resuming/suspending and the runtime PM callback is trying to grab the
143  * prepare_lock for something like clk_prepare_enable() while
144  * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
145  * PM resume/suspend the device as well.
146  *
147  * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
148  * success. Otherwise the lock is released on failure.
149  *
150  * Return: 0 on success, negative errno otherwise.
151  */
152 static int clk_pm_runtime_get_all(void)
153 {
154 	int ret;
155 	struct clk_core *core, *failed;
156 
157 	/*
158 	 * Grab the list lock to prevent any new clks from being registered
159 	 * or unregistered until clk_pm_runtime_put_all().
160 	 */
161 	mutex_lock(&clk_rpm_list_lock);
162 
163 	/*
164 	 * Runtime PM "get" all the devices that are needed for the clks
165 	 * currently registered. Do this without holding the prepare_lock, to
166 	 * avoid the deadlock.
167 	 */
168 	hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
169 		ret = clk_pm_runtime_get(core);
170 		if (ret) {
171 			failed = core;
172 			pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
173 			       dev_name(failed->dev), failed->name);
174 			goto err;
175 		}
176 	}
177 
178 	return 0;
179 
180 err:
181 	hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
182 		if (core == failed)
183 			break;
184 
185 		clk_pm_runtime_put(core);
186 	}
187 	mutex_unlock(&clk_rpm_list_lock);
188 
189 	return ret;
190 }
191 
192 /**
193  * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
194  *
195  * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
196  * the 'clk_rpm_list_lock'.
197  */
198 static void clk_pm_runtime_put_all(void)
199 {
200 	struct clk_core *core;
201 
202 	hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
203 		clk_pm_runtime_put(core);
204 	mutex_unlock(&clk_rpm_list_lock);
205 }
206 
207 static void clk_pm_runtime_init(struct clk_core *core)
208 {
209 	struct device *dev = core->dev;
210 
211 	if (dev && pm_runtime_enabled(dev)) {
212 		core->rpm_enabled = true;
213 
214 		mutex_lock(&clk_rpm_list_lock);
215 		hlist_add_head(&core->rpm_node, &clk_rpm_list);
216 		mutex_unlock(&clk_rpm_list_lock);
217 	}
218 }
219 
220 /***           locking             ***/
221 static void clk_prepare_lock(void)
222 {
223 	if (!mutex_trylock(&prepare_lock)) {
224 		if (prepare_owner == current) {
225 			prepare_refcnt++;
226 			return;
227 		}
228 		mutex_lock(&prepare_lock);
229 	}
230 	WARN_ON_ONCE(prepare_owner != NULL);
231 	WARN_ON_ONCE(prepare_refcnt != 0);
232 	prepare_owner = current;
233 	prepare_refcnt = 1;
234 }
235 
236 static void clk_prepare_unlock(void)
237 {
238 	WARN_ON_ONCE(prepare_owner != current);
239 	WARN_ON_ONCE(prepare_refcnt == 0);
240 
241 	if (--prepare_refcnt)
242 		return;
243 	prepare_owner = NULL;
244 	mutex_unlock(&prepare_lock);
245 }
246 
247 static unsigned long clk_enable_lock(void)
248 	__acquires(enable_lock)
249 {
250 	unsigned long flags;
251 
252 	/*
253 	 * On UP systems, spin_trylock_irqsave() always returns true, even if
254 	 * we already hold the lock. So, in that case, we rely only on
255 	 * reference counting.
256 	 */
257 	if (!IS_ENABLED(CONFIG_SMP) ||
258 	    !spin_trylock_irqsave(&enable_lock, flags)) {
259 		if (enable_owner == current) {
260 			enable_refcnt++;
261 			__acquire(enable_lock);
262 			if (!IS_ENABLED(CONFIG_SMP))
263 				local_save_flags(flags);
264 			return flags;
265 		}
266 		spin_lock_irqsave(&enable_lock, flags);
267 	}
268 	WARN_ON_ONCE(enable_owner != NULL);
269 	WARN_ON_ONCE(enable_refcnt != 0);
270 	enable_owner = current;
271 	enable_refcnt = 1;
272 	return flags;
273 }
274 
275 static void clk_enable_unlock(unsigned long flags)
276 	__releases(enable_lock)
277 {
278 	WARN_ON_ONCE(enable_owner != current);
279 	WARN_ON_ONCE(enable_refcnt == 0);
280 
281 	if (--enable_refcnt) {
282 		__release(enable_lock);
283 		return;
284 	}
285 	enable_owner = NULL;
286 	spin_unlock_irqrestore(&enable_lock, flags);
287 }
288 
289 static bool clk_core_rate_is_protected(struct clk_core *core)
290 {
291 	return core->protect_count;
292 }
293 
294 static bool clk_core_is_prepared(struct clk_core *core)
295 {
296 	bool ret = false;
297 
298 	/*
299 	 * .is_prepared is optional for clocks that can prepare
300 	 * fall back to software usage counter if it is missing
301 	 */
302 	if (!core->ops->is_prepared)
303 		return core->prepare_count;
304 
305 	if (!clk_pm_runtime_get(core)) {
306 		ret = core->ops->is_prepared(core->hw);
307 		clk_pm_runtime_put(core);
308 	}
309 
310 	return ret;
311 }
312 
313 static bool clk_core_is_enabled(struct clk_core *core)
314 {
315 	bool ret = false;
316 
317 	/*
318 	 * .is_enabled is only mandatory for clocks that gate
319 	 * fall back to software usage counter if .is_enabled is missing
320 	 */
321 	if (!core->ops->is_enabled)
322 		return core->enable_count;
323 
324 	/*
325 	 * Check if clock controller's device is runtime active before
326 	 * calling .is_enabled callback. If not, assume that clock is
327 	 * disabled, because we might be called from atomic context, from
328 	 * which pm_runtime_get() is not allowed.
329 	 * This function is called mainly from clk_disable_unused_subtree,
330 	 * which ensures proper runtime pm activation of controller before
331 	 * taking enable spinlock, but the below check is needed if one tries
332 	 * to call it from other places.
333 	 */
334 	if (core->rpm_enabled) {
335 		pm_runtime_get_noresume(core->dev);
336 		if (!pm_runtime_active(core->dev)) {
337 			ret = false;
338 			goto done;
339 		}
340 	}
341 
342 	/*
343 	 * This could be called with the enable lock held, or from atomic
344 	 * context. If the parent isn't enabled already, we can't do
345 	 * anything here. We can also assume this clock isn't enabled.
346 	 */
347 	if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent)
348 		if (!clk_core_is_enabled(core->parent)) {
349 			ret = false;
350 			goto done;
351 		}
352 
353 	ret = core->ops->is_enabled(core->hw);
354 done:
355 	if (core->rpm_enabled)
356 		pm_runtime_put(core->dev);
357 
358 	return ret;
359 }
360 
361 /***    helper functions   ***/
362 
363 const char *__clk_get_name(const struct clk *clk)
364 {
365 	return !clk ? NULL : clk->core->name;
366 }
367 EXPORT_SYMBOL_GPL(__clk_get_name);
368 
369 const char *clk_hw_get_name(const struct clk_hw *hw)
370 {
371 	return hw->core->name;
372 }
373 EXPORT_SYMBOL_GPL(clk_hw_get_name);
374 
375 struct device *clk_hw_get_dev(const struct clk_hw *hw)
376 {
377 	return hw->core->dev;
378 }
379 EXPORT_SYMBOL_GPL(clk_hw_get_dev);
380 
381 struct device_node *clk_hw_get_of_node(const struct clk_hw *hw)
382 {
383 	return hw->core->of_node;
384 }
385 EXPORT_SYMBOL_GPL(clk_hw_get_of_node);
386 
387 struct clk_hw *__clk_get_hw(struct clk *clk)
388 {
389 	return !clk ? NULL : clk->core->hw;
390 }
391 EXPORT_SYMBOL_GPL(__clk_get_hw);
392 
393 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
394 {
395 	return hw->core->num_parents;
396 }
397 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
398 
399 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
400 {
401 	return hw->core->parent ? hw->core->parent->hw : NULL;
402 }
403 EXPORT_SYMBOL_GPL(clk_hw_get_parent);
404 
405 static struct clk_core *clk_core_lookup(const char *name)
406 {
407 	struct clk_core *core;
408 	u32 hash;
409 
410 	if (!name)
411 		return NULL;
412 
413 	hash = full_name_hash(NULL, name, strlen(name));
414 
415 	/* search the hashtable */
416 	hash_for_each_possible(clk_hashtable, core, hashtable_node, hash)
417 		if (!strcmp(core->name, name))
418 			return core;
419 
420 	return NULL;
421 }
422 
423 #ifdef CONFIG_OF
424 static int of_parse_clkspec(const struct device_node *np, int index,
425 			    const char *name, struct of_phandle_args *out_args);
426 static struct clk_hw *
427 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
428 #else
429 static inline int of_parse_clkspec(const struct device_node *np, int index,
430 				   const char *name,
431 				   struct of_phandle_args *out_args)
432 {
433 	return -ENOENT;
434 }
435 static inline struct clk_hw *
436 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
437 {
438 	return ERR_PTR(-ENOENT);
439 }
440 #endif
441 
442 /**
443  * clk_core_get - Find the clk_core parent of a clk
444  * @core: clk to find parent of
445  * @p_index: parent index to search for
446  *
447  * This is the preferred method for clk providers to find the parent of a
448  * clk when that parent is external to the clk controller. The parent_names
449  * array is indexed and treated as a local name matching a string in the device
450  * node's 'clock-names' property or as the 'con_id' matching the device's
451  * dev_name() in a clk_lookup. This allows clk providers to use their own
452  * namespace instead of looking for a globally unique parent string.
453  *
454  * For example the following DT snippet would allow a clock registered by the
455  * clock-controller@c001 that has a clk_init_data::parent_data array
456  * with 'xtal' in the 'name' member to find the clock provided by the
457  * clock-controller@f00abcd without needing to get the globally unique name of
458  * the xtal clk.
459  *
460  *      parent: clock-controller@f00abcd {
461  *              reg = <0xf00abcd 0xabcd>;
462  *              #clock-cells = <0>;
463  *      };
464  *
465  *      clock-controller@c001 {
466  *              reg = <0xc001 0xf00d>;
467  *              clocks = <&parent>;
468  *              clock-names = "xtal";
469  *              #clock-cells = <1>;
470  *      };
471  *
472  * Returns: -ENOENT when the provider can't be found or the clk doesn't
473  * exist in the provider or the name can't be found in the DT node or
474  * in a clkdev lookup. NULL when the provider knows about the clk but it
475  * isn't provided on this system.
476  * A valid clk_core pointer when the clk can be found in the provider.
477  */
478 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
479 {
480 	const char *name = core->parents[p_index].fw_name;
481 	int index = core->parents[p_index].index;
482 	struct clk_hw *hw = ERR_PTR(-ENOENT);
483 	struct device *dev = core->dev;
484 	const char *dev_id = dev ? dev_name(dev) : NULL;
485 	struct device_node *np = core->of_node;
486 	struct of_phandle_args clkspec;
487 
488 	if (np && (name || index >= 0) &&
489 	    !of_parse_clkspec(np, index, name, &clkspec)) {
490 		hw = of_clk_get_hw_from_clkspec(&clkspec);
491 		of_node_put(clkspec.np);
492 	} else if (name) {
493 		/*
494 		 * If the DT search above couldn't find the provider fallback to
495 		 * looking up via clkdev based clk_lookups.
496 		 */
497 		hw = clk_find_hw(dev_id, name);
498 	}
499 
500 	if (IS_ERR(hw))
501 		return ERR_CAST(hw);
502 
503 	if (!hw)
504 		return NULL;
505 
506 	return hw->core;
507 }
508 
509 static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
510 {
511 	struct clk_parent_map *entry = &core->parents[index];
512 	struct clk_core *parent;
513 
514 	if (entry->hw) {
515 		parent = entry->hw->core;
516 	} else {
517 		parent = clk_core_get(core, index);
518 		if (PTR_ERR(parent) == -ENOENT && entry->name)
519 			parent = clk_core_lookup(entry->name);
520 	}
521 
522 	/*
523 	 * We have a direct reference but it isn't registered yet?
524 	 * Orphan it and let clk_reparent() update the orphan status
525 	 * when the parent is registered.
526 	 */
527 	if (!parent)
528 		parent = ERR_PTR(-EPROBE_DEFER);
529 
530 	/* Only cache it if it's not an error */
531 	if (!IS_ERR(parent))
532 		entry->core = parent;
533 }
534 
535 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
536 							 u8 index)
537 {
538 	if (!core || index >= core->num_parents || !core->parents)
539 		return NULL;
540 
541 	if (!core->parents[index].core)
542 		clk_core_fill_parent_index(core, index);
543 
544 	return core->parents[index].core;
545 }
546 
547 struct clk_hw *
548 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
549 {
550 	struct clk_core *parent;
551 
552 	parent = clk_core_get_parent_by_index(hw->core, index);
553 
554 	return !parent ? NULL : parent->hw;
555 }
556 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
557 
558 unsigned int __clk_get_enable_count(struct clk *clk)
559 {
560 	return !clk ? 0 : clk->core->enable_count;
561 }
562 
563 static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
564 {
565 	if (!core)
566 		return 0;
567 
568 	if (!core->num_parents || core->parent)
569 		return core->rate;
570 
571 	/*
572 	 * Clk must have a parent because num_parents > 0 but the parent isn't
573 	 * known yet. Best to return 0 as the rate of this clk until we can
574 	 * properly recalc the rate based on the parent's rate.
575 	 */
576 	return 0;
577 }
578 
579 unsigned long clk_hw_get_rate(const struct clk_hw *hw)
580 {
581 	return clk_core_get_rate_nolock(hw->core);
582 }
583 EXPORT_SYMBOL_GPL(clk_hw_get_rate);
584 
585 static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
586 {
587 	if (!core)
588 		return 0;
589 
590 	return core->accuracy;
591 }
592 
593 unsigned long clk_hw_get_flags(const struct clk_hw *hw)
594 {
595 	return hw->core->flags;
596 }
597 EXPORT_SYMBOL_GPL(clk_hw_get_flags);
598 
599 bool clk_hw_is_prepared(const struct clk_hw *hw)
600 {
601 	return clk_core_is_prepared(hw->core);
602 }
603 EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
604 
605 bool clk_hw_is_enabled(const struct clk_hw *hw)
606 {
607 	return clk_core_is_enabled(hw->core);
608 }
609 EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
610 
611 bool __clk_is_enabled(struct clk *clk)
612 {
613 	if (!clk)
614 		return false;
615 
616 	return clk_core_is_enabled(clk->core);
617 }
618 EXPORT_SYMBOL_GPL(__clk_is_enabled);
619 
620 static bool mux_is_better_rate(unsigned long rate, unsigned long now,
621 			   unsigned long best, unsigned long flags)
622 {
623 	if (flags & CLK_MUX_ROUND_CLOSEST)
624 		return abs(now - rate) < abs(best - rate);
625 
626 	return now <= rate && now > best;
627 }
628 
629 static void clk_core_init_rate_req(struct clk_core * const core,
630 				   struct clk_rate_request *req,
631 				   unsigned long rate);
632 
633 static int clk_core_round_rate_nolock(struct clk_core *core,
634 				      struct clk_rate_request *req);
635 
636 static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent)
637 {
638 	struct clk_core *tmp;
639 	unsigned int i;
640 
641 	/* Optimize for the case where the parent is already the parent. */
642 	if (core->parent == parent)
643 		return true;
644 
645 	for (i = 0; i < core->num_parents; i++) {
646 		tmp = clk_core_get_parent_by_index(core, i);
647 		if (!tmp)
648 			continue;
649 
650 		if (tmp == parent)
651 			return true;
652 	}
653 
654 	return false;
655 }
656 
657 static void
658 clk_core_forward_rate_req(struct clk_core *core,
659 			  const struct clk_rate_request *old_req,
660 			  struct clk_core *parent,
661 			  struct clk_rate_request *req,
662 			  unsigned long parent_rate)
663 {
664 	if (WARN_ON(!clk_core_has_parent(core, parent)))
665 		return;
666 
667 	clk_core_init_rate_req(parent, req, parent_rate);
668 
669 	if (req->min_rate < old_req->min_rate)
670 		req->min_rate = old_req->min_rate;
671 
672 	if (req->max_rate > old_req->max_rate)
673 		req->max_rate = old_req->max_rate;
674 }
675 
676 static int
677 clk_core_determine_rate_no_reparent(struct clk_hw *hw,
678 				    struct clk_rate_request *req)
679 {
680 	struct clk_core *core = hw->core;
681 	struct clk_core *parent = core->parent;
682 	unsigned long best;
683 	int ret;
684 
685 	if (core->flags & CLK_SET_RATE_PARENT) {
686 		struct clk_rate_request parent_req;
687 
688 		if (!parent) {
689 			req->rate = 0;
690 			return 0;
691 		}
692 
693 		clk_core_forward_rate_req(core, req, parent, &parent_req,
694 					  req->rate);
695 
696 		trace_clk_rate_request_start(&parent_req);
697 
698 		ret = clk_core_round_rate_nolock(parent, &parent_req);
699 		if (ret)
700 			return ret;
701 
702 		trace_clk_rate_request_done(&parent_req);
703 
704 		best = parent_req.rate;
705 	} else if (parent) {
706 		best = clk_core_get_rate_nolock(parent);
707 	} else {
708 		best = clk_core_get_rate_nolock(core);
709 	}
710 
711 	req->best_parent_rate = best;
712 	req->rate = best;
713 
714 	return 0;
715 }
716 
717 int clk_mux_determine_rate_flags(struct clk_hw *hw,
718 				 struct clk_rate_request *req,
719 				 unsigned long flags)
720 {
721 	struct clk_core *core = hw->core, *parent, *best_parent = NULL;
722 	int i, num_parents, ret;
723 	unsigned long best = 0;
724 
725 	/* if NO_REPARENT flag set, pass through to current parent */
726 	if (core->flags & CLK_SET_RATE_NO_REPARENT)
727 		return clk_core_determine_rate_no_reparent(hw, req);
728 
729 	/* find the parent that can provide the fastest rate <= rate */
730 	num_parents = core->num_parents;
731 	for (i = 0; i < num_parents; i++) {
732 		unsigned long parent_rate;
733 
734 		parent = clk_core_get_parent_by_index(core, i);
735 		if (!parent)
736 			continue;
737 
738 		if (core->flags & CLK_SET_RATE_PARENT) {
739 			struct clk_rate_request parent_req;
740 
741 			clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
742 
743 			trace_clk_rate_request_start(&parent_req);
744 
745 			ret = clk_core_round_rate_nolock(parent, &parent_req);
746 			if (ret)
747 				continue;
748 
749 			trace_clk_rate_request_done(&parent_req);
750 
751 			parent_rate = parent_req.rate;
752 		} else {
753 			parent_rate = clk_core_get_rate_nolock(parent);
754 		}
755 
756 		if (mux_is_better_rate(req->rate, parent_rate,
757 				       best, flags)) {
758 			best_parent = parent;
759 			best = parent_rate;
760 		}
761 	}
762 
763 	if (!best_parent)
764 		return -EINVAL;
765 
766 	req->best_parent_hw = best_parent->hw;
767 	req->best_parent_rate = best;
768 	req->rate = best;
769 
770 	return 0;
771 }
772 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
773 
774 struct clk *__clk_lookup(const char *name)
775 {
776 	struct clk_core *core = clk_core_lookup(name);
777 
778 	return !core ? NULL : core->hw->clk;
779 }
780 
781 static void clk_core_get_boundaries(struct clk_core *core,
782 				    unsigned long *min_rate,
783 				    unsigned long *max_rate)
784 {
785 	struct clk *clk_user;
786 
787 	lockdep_assert_held(&prepare_lock);
788 
789 	*min_rate = core->min_rate;
790 	*max_rate = core->max_rate;
791 
792 	hlist_for_each_entry(clk_user, &core->clks, clks_node)
793 		*min_rate = max(*min_rate, clk_user->min_rate);
794 
795 	hlist_for_each_entry(clk_user, &core->clks, clks_node)
796 		*max_rate = min(*max_rate, clk_user->max_rate);
797 }
798 
799 /*
800  * clk_hw_get_rate_range() - returns the clock rate range for a hw clk
801  * @hw: the hw clk we want to get the range from
802  * @min_rate: pointer to the variable that will hold the minimum
803  * @max_rate: pointer to the variable that will hold the maximum
804  *
805  * Fills the @min_rate and @max_rate variables with the minimum and
806  * maximum that clock can reach.
807  */
808 void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
809 			   unsigned long *max_rate)
810 {
811 	clk_core_get_boundaries(hw->core, min_rate, max_rate);
812 }
813 EXPORT_SYMBOL_GPL(clk_hw_get_rate_range);
814 
815 static bool clk_core_check_boundaries(struct clk_core *core,
816 				      unsigned long min_rate,
817 				      unsigned long max_rate)
818 {
819 	struct clk *user;
820 
821 	lockdep_assert_held(&prepare_lock);
822 
823 	if (min_rate > core->max_rate || max_rate < core->min_rate)
824 		return false;
825 
826 	hlist_for_each_entry(user, &core->clks, clks_node)
827 		if (min_rate > user->max_rate || max_rate < user->min_rate)
828 			return false;
829 
830 	return true;
831 }
832 
833 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
834 			   unsigned long max_rate)
835 {
836 	hw->core->min_rate = min_rate;
837 	hw->core->max_rate = max_rate;
838 }
839 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
840 
841 /*
842  * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
843  * @hw: mux type clk to determine rate on
844  * @req: rate request, also used to return preferred parent and frequencies
845  *
846  * Helper for finding best parent to provide a given frequency. This can be used
847  * directly as a determine_rate callback (e.g. for a mux), or from a more
848  * complex clock that may combine a mux with other operations.
849  *
850  * Returns: 0 on success, -EERROR value on error
851  */
852 int __clk_mux_determine_rate(struct clk_hw *hw,
853 			     struct clk_rate_request *req)
854 {
855 	return clk_mux_determine_rate_flags(hw, req, 0);
856 }
857 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
858 
859 int __clk_mux_determine_rate_closest(struct clk_hw *hw,
860 				     struct clk_rate_request *req)
861 {
862 	return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
863 }
864 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
865 
866 /*
867  * clk_hw_determine_rate_no_reparent - clk_ops::determine_rate implementation for a clk that doesn't reparent
868  * @hw: mux type clk to determine rate on
869  * @req: rate request, also used to return preferred frequency
870  *
871  * Helper for finding best parent rate to provide a given frequency.
872  * This can be used directly as a determine_rate callback (e.g. for a
873  * mux), or from a more complex clock that may combine a mux with other
874  * operations.
875  *
876  * Returns: 0 on success, -EERROR value on error
877  */
878 int clk_hw_determine_rate_no_reparent(struct clk_hw *hw,
879 				      struct clk_rate_request *req)
880 {
881 	return clk_core_determine_rate_no_reparent(hw, req);
882 }
883 EXPORT_SYMBOL_GPL(clk_hw_determine_rate_no_reparent);
884 
885 /***        clk api        ***/
886 
887 static void clk_core_rate_unprotect(struct clk_core *core)
888 {
889 	lockdep_assert_held(&prepare_lock);
890 
891 	if (!core)
892 		return;
893 
894 	if (WARN(core->protect_count == 0,
895 	    "%s already unprotected\n", core->name))
896 		return;
897 
898 	if (--core->protect_count > 0)
899 		return;
900 
901 	clk_core_rate_unprotect(core->parent);
902 }
903 
904 static int clk_core_rate_nuke_protect(struct clk_core *core)
905 {
906 	int ret;
907 
908 	lockdep_assert_held(&prepare_lock);
909 
910 	if (!core)
911 		return -EINVAL;
912 
913 	if (core->protect_count == 0)
914 		return 0;
915 
916 	ret = core->protect_count;
917 	core->protect_count = 1;
918 	clk_core_rate_unprotect(core);
919 
920 	return ret;
921 }
922 
923 /**
924  * clk_rate_exclusive_put - release exclusivity over clock rate control
925  * @clk: the clk over which the exclusivity is released
926  *
927  * clk_rate_exclusive_put() completes a critical section during which a clock
928  * consumer cannot tolerate any other consumer making any operation on the
929  * clock which could result in a rate change or rate glitch. Exclusive clocks
930  * cannot have their rate changed, either directly or indirectly due to changes
931  * further up the parent chain of clocks. As a result, clocks up parent chain
932  * also get under exclusive control of the calling consumer.
933  *
934  * If exlusivity is claimed more than once on clock, even by the same consumer,
935  * the rate effectively gets locked as exclusivity can't be preempted.
936  *
937  * Calls to clk_rate_exclusive_put() must be balanced with calls to
938  * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
939  * error status.
940  */
941 void clk_rate_exclusive_put(struct clk *clk)
942 {
943 	if (!clk)
944 		return;
945 
946 	clk_prepare_lock();
947 
948 	/*
949 	 * if there is something wrong with this consumer protect count, stop
950 	 * here before messing with the provider
951 	 */
952 	if (WARN_ON(clk->exclusive_count <= 0))
953 		goto out;
954 
955 	clk_core_rate_unprotect(clk->core);
956 	clk->exclusive_count--;
957 out:
958 	clk_prepare_unlock();
959 }
960 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
961 
962 static void clk_core_rate_protect(struct clk_core *core)
963 {
964 	lockdep_assert_held(&prepare_lock);
965 
966 	if (!core)
967 		return;
968 
969 	if (core->protect_count == 0)
970 		clk_core_rate_protect(core->parent);
971 
972 	core->protect_count++;
973 }
974 
975 static void clk_core_rate_restore_protect(struct clk_core *core, int count)
976 {
977 	lockdep_assert_held(&prepare_lock);
978 
979 	if (!core)
980 		return;
981 
982 	if (count == 0)
983 		return;
984 
985 	clk_core_rate_protect(core);
986 	core->protect_count = count;
987 }
988 
989 /**
990  * clk_rate_exclusive_get - get exclusivity over the clk rate control
991  * @clk: the clk over which the exclusity of rate control is requested
992  *
993  * clk_rate_exclusive_get() begins a critical section during which a clock
994  * consumer cannot tolerate any other consumer making any operation on the
995  * clock which could result in a rate change or rate glitch. Exclusive clocks
996  * cannot have their rate changed, either directly or indirectly due to changes
997  * further up the parent chain of clocks. As a result, clocks up parent chain
998  * also get under exclusive control of the calling consumer.
999  *
1000  * If exlusivity is claimed more than once on clock, even by the same consumer,
1001  * the rate effectively gets locked as exclusivity can't be preempted.
1002  *
1003  * Calls to clk_rate_exclusive_get() should be balanced with calls to
1004  * clk_rate_exclusive_put(). Calls to this function may sleep.
1005  * Returns 0 on success, -EERROR otherwise
1006  */
1007 int clk_rate_exclusive_get(struct clk *clk)
1008 {
1009 	if (!clk)
1010 		return 0;
1011 
1012 	clk_prepare_lock();
1013 	clk_core_rate_protect(clk->core);
1014 	clk->exclusive_count++;
1015 	clk_prepare_unlock();
1016 
1017 	return 0;
1018 }
1019 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
1020 
1021 static void devm_clk_rate_exclusive_put(void *data)
1022 {
1023 	struct clk *clk = data;
1024 
1025 	clk_rate_exclusive_put(clk);
1026 }
1027 
1028 int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk)
1029 {
1030 	int ret;
1031 
1032 	ret = clk_rate_exclusive_get(clk);
1033 	if (ret)
1034 		return ret;
1035 
1036 	return devm_add_action_or_reset(dev, devm_clk_rate_exclusive_put, clk);
1037 }
1038 EXPORT_SYMBOL_GPL(devm_clk_rate_exclusive_get);
1039 
1040 static void clk_core_unprepare(struct clk_core *core)
1041 {
1042 	lockdep_assert_held(&prepare_lock);
1043 
1044 	if (!core)
1045 		return;
1046 
1047 	if (WARN(core->prepare_count == 0,
1048 	    "%s already unprepared\n", core->name))
1049 		return;
1050 
1051 	if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
1052 	    "Unpreparing critical %s\n", core->name))
1053 		return;
1054 
1055 	if (core->flags & CLK_SET_RATE_GATE)
1056 		clk_core_rate_unprotect(core);
1057 
1058 	if (--core->prepare_count > 0)
1059 		return;
1060 
1061 	WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
1062 
1063 	trace_clk_unprepare(core);
1064 
1065 	if (core->ops->unprepare)
1066 		core->ops->unprepare(core->hw);
1067 
1068 	trace_clk_unprepare_complete(core);
1069 	clk_core_unprepare(core->parent);
1070 	clk_pm_runtime_put(core);
1071 }
1072 
1073 static void clk_core_unprepare_lock(struct clk_core *core)
1074 {
1075 	clk_prepare_lock();
1076 	clk_core_unprepare(core);
1077 	clk_prepare_unlock();
1078 }
1079 
1080 /**
1081  * clk_unprepare - undo preparation of a clock source
1082  * @clk: the clk being unprepared
1083  *
1084  * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
1085  * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
1086  * if the operation may sleep.  One example is a clk which is accessed over
1087  * I2c.  In the complex case a clk gate operation may require a fast and a slow
1088  * part.  It is this reason that clk_unprepare and clk_disable are not mutually
1089  * exclusive.  In fact clk_disable must be called before clk_unprepare.
1090  */
1091 void clk_unprepare(struct clk *clk)
1092 {
1093 	if (IS_ERR_OR_NULL(clk))
1094 		return;
1095 
1096 	clk_core_unprepare_lock(clk->core);
1097 }
1098 EXPORT_SYMBOL_GPL(clk_unprepare);
1099 
1100 static int clk_core_prepare(struct clk_core *core)
1101 {
1102 	int ret = 0;
1103 
1104 	lockdep_assert_held(&prepare_lock);
1105 
1106 	if (!core)
1107 		return 0;
1108 
1109 	if (core->prepare_count == 0) {
1110 		ret = clk_pm_runtime_get(core);
1111 		if (ret)
1112 			return ret;
1113 
1114 		ret = clk_core_prepare(core->parent);
1115 		if (ret)
1116 			goto runtime_put;
1117 
1118 		trace_clk_prepare(core);
1119 
1120 		if (core->ops->prepare)
1121 			ret = core->ops->prepare(core->hw);
1122 
1123 		trace_clk_prepare_complete(core);
1124 
1125 		if (ret)
1126 			goto unprepare;
1127 	}
1128 
1129 	core->prepare_count++;
1130 
1131 	/*
1132 	 * CLK_SET_RATE_GATE is a special case of clock protection
1133 	 * Instead of a consumer claiming exclusive rate control, it is
1134 	 * actually the provider which prevents any consumer from making any
1135 	 * operation which could result in a rate change or rate glitch while
1136 	 * the clock is prepared.
1137 	 */
1138 	if (core->flags & CLK_SET_RATE_GATE)
1139 		clk_core_rate_protect(core);
1140 
1141 	return 0;
1142 unprepare:
1143 	clk_core_unprepare(core->parent);
1144 runtime_put:
1145 	clk_pm_runtime_put(core);
1146 	return ret;
1147 }
1148 
1149 static int clk_core_prepare_lock(struct clk_core *core)
1150 {
1151 	int ret;
1152 
1153 	clk_prepare_lock();
1154 	ret = clk_core_prepare(core);
1155 	clk_prepare_unlock();
1156 
1157 	return ret;
1158 }
1159 
1160 /**
1161  * clk_prepare - prepare a clock source
1162  * @clk: the clk being prepared
1163  *
1164  * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
1165  * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
1166  * operation may sleep.  One example is a clk which is accessed over I2c.  In
1167  * the complex case a clk ungate operation may require a fast and a slow part.
1168  * It is this reason that clk_prepare and clk_enable are not mutually
1169  * exclusive.  In fact clk_prepare must be called before clk_enable.
1170  * Returns 0 on success, -EERROR otherwise.
1171  */
1172 int clk_prepare(struct clk *clk)
1173 {
1174 	if (!clk)
1175 		return 0;
1176 
1177 	return clk_core_prepare_lock(clk->core);
1178 }
1179 EXPORT_SYMBOL_GPL(clk_prepare);
1180 
1181 static void clk_core_disable(struct clk_core *core)
1182 {
1183 	lockdep_assert_held(&enable_lock);
1184 
1185 	if (!core)
1186 		return;
1187 
1188 	if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
1189 		return;
1190 
1191 	if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
1192 	    "Disabling critical %s\n", core->name))
1193 		return;
1194 
1195 	if (--core->enable_count > 0)
1196 		return;
1197 
1198 	trace_clk_disable(core);
1199 
1200 	if (core->ops->disable)
1201 		core->ops->disable(core->hw);
1202 
1203 	trace_clk_disable_complete(core);
1204 
1205 	clk_core_disable(core->parent);
1206 }
1207 
1208 static void clk_core_disable_lock(struct clk_core *core)
1209 {
1210 	unsigned long flags;
1211 
1212 	flags = clk_enable_lock();
1213 	clk_core_disable(core);
1214 	clk_enable_unlock(flags);
1215 }
1216 
1217 /**
1218  * clk_disable - gate a clock
1219  * @clk: the clk being gated
1220  *
1221  * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
1222  * a simple case, clk_disable can be used instead of clk_unprepare to gate a
1223  * clk if the operation is fast and will never sleep.  One example is a
1224  * SoC-internal clk which is controlled via simple register writes.  In the
1225  * complex case a clk gate operation may require a fast and a slow part.  It is
1226  * this reason that clk_unprepare and clk_disable are not mutually exclusive.
1227  * In fact clk_disable must be called before clk_unprepare.
1228  */
1229 void clk_disable(struct clk *clk)
1230 {
1231 	if (IS_ERR_OR_NULL(clk))
1232 		return;
1233 
1234 	clk_core_disable_lock(clk->core);
1235 }
1236 EXPORT_SYMBOL_GPL(clk_disable);
1237 
1238 static int clk_core_enable(struct clk_core *core)
1239 {
1240 	int ret = 0;
1241 
1242 	lockdep_assert_held(&enable_lock);
1243 
1244 	if (!core)
1245 		return 0;
1246 
1247 	if (WARN(core->prepare_count == 0,
1248 	    "Enabling unprepared %s\n", core->name))
1249 		return -ESHUTDOWN;
1250 
1251 	if (core->enable_count == 0) {
1252 		ret = clk_core_enable(core->parent);
1253 
1254 		if (ret)
1255 			return ret;
1256 
1257 		trace_clk_enable(core);
1258 
1259 		if (core->ops->enable)
1260 			ret = core->ops->enable(core->hw);
1261 
1262 		trace_clk_enable_complete(core);
1263 
1264 		if (ret) {
1265 			clk_core_disable(core->parent);
1266 			return ret;
1267 		}
1268 	}
1269 
1270 	core->enable_count++;
1271 	return 0;
1272 }
1273 
1274 static int clk_core_enable_lock(struct clk_core *core)
1275 {
1276 	unsigned long flags;
1277 	int ret;
1278 
1279 	flags = clk_enable_lock();
1280 	ret = clk_core_enable(core);
1281 	clk_enable_unlock(flags);
1282 
1283 	return ret;
1284 }
1285 
1286 /**
1287  * clk_gate_restore_context - restore context for poweroff
1288  * @hw: the clk_hw pointer of clock whose state is to be restored
1289  *
1290  * The clock gate restore context function enables or disables
1291  * the gate clocks based on the enable_count. This is done in cases
1292  * where the clock context is lost and based on the enable_count
1293  * the clock either needs to be enabled/disabled. This
1294  * helps restore the state of gate clocks.
1295  */
1296 void clk_gate_restore_context(struct clk_hw *hw)
1297 {
1298 	struct clk_core *core = hw->core;
1299 
1300 	if (core->enable_count)
1301 		core->ops->enable(hw);
1302 	else
1303 		core->ops->disable(hw);
1304 }
1305 EXPORT_SYMBOL_GPL(clk_gate_restore_context);
1306 
1307 static int clk_core_save_context(struct clk_core *core)
1308 {
1309 	struct clk_core *child;
1310 	int ret = 0;
1311 
1312 	hlist_for_each_entry(child, &core->children, child_node) {
1313 		ret = clk_core_save_context(child);
1314 		if (ret < 0)
1315 			return ret;
1316 	}
1317 
1318 	if (core->ops && core->ops->save_context)
1319 		ret = core->ops->save_context(core->hw);
1320 
1321 	return ret;
1322 }
1323 
1324 static void clk_core_restore_context(struct clk_core *core)
1325 {
1326 	struct clk_core *child;
1327 
1328 	if (core->ops && core->ops->restore_context)
1329 		core->ops->restore_context(core->hw);
1330 
1331 	hlist_for_each_entry(child, &core->children, child_node)
1332 		clk_core_restore_context(child);
1333 }
1334 
1335 /**
1336  * clk_save_context - save clock context for poweroff
1337  *
1338  * Saves the context of the clock register for powerstates in which the
1339  * contents of the registers will be lost. Occurs deep within the suspend
1340  * code.  Returns 0 on success.
1341  */
1342 int clk_save_context(void)
1343 {
1344 	struct clk_core *clk;
1345 	int ret;
1346 
1347 	hlist_for_each_entry(clk, &clk_root_list, child_node) {
1348 		ret = clk_core_save_context(clk);
1349 		if (ret < 0)
1350 			return ret;
1351 	}
1352 
1353 	hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1354 		ret = clk_core_save_context(clk);
1355 		if (ret < 0)
1356 			return ret;
1357 	}
1358 
1359 	return 0;
1360 }
1361 EXPORT_SYMBOL_GPL(clk_save_context);
1362 
1363 /**
1364  * clk_restore_context - restore clock context after poweroff
1365  *
1366  * Restore the saved clock context upon resume.
1367  *
1368  */
1369 void clk_restore_context(void)
1370 {
1371 	struct clk_core *core;
1372 
1373 	hlist_for_each_entry(core, &clk_root_list, child_node)
1374 		clk_core_restore_context(core);
1375 
1376 	hlist_for_each_entry(core, &clk_orphan_list, child_node)
1377 		clk_core_restore_context(core);
1378 }
1379 EXPORT_SYMBOL_GPL(clk_restore_context);
1380 
1381 /**
1382  * clk_enable - ungate a clock
1383  * @clk: the clk being ungated
1384  *
1385  * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
1386  * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
1387  * if the operation will never sleep.  One example is a SoC-internal clk which
1388  * is controlled via simple register writes.  In the complex case a clk ungate
1389  * operation may require a fast and a slow part.  It is this reason that
1390  * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
1391  * must be called before clk_enable.  Returns 0 on success, -EERROR
1392  * otherwise.
1393  */
1394 int clk_enable(struct clk *clk)
1395 {
1396 	if (!clk)
1397 		return 0;
1398 
1399 	return clk_core_enable_lock(clk->core);
1400 }
1401 EXPORT_SYMBOL_GPL(clk_enable);
1402 
1403 /**
1404  * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
1405  * @clk: clock source
1406  *
1407  * Returns true if clk_prepare() implicitly enables the clock, effectively
1408  * making clk_enable()/clk_disable() no-ops, false otherwise.
1409  *
1410  * This is of interest mainly to power management code where actually
1411  * disabling the clock also requires unpreparing it to have any material
1412  * effect.
1413  *
1414  * Regardless of the value returned here, the caller must always invoke
1415  * clk_enable() or clk_prepare_enable()  and counterparts for usage counts
1416  * to be right.
1417  */
1418 bool clk_is_enabled_when_prepared(struct clk *clk)
1419 {
1420 	return clk && !(clk->core->ops->enable && clk->core->ops->disable);
1421 }
1422 EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared);
1423 
1424 static int clk_core_prepare_enable(struct clk_core *core)
1425 {
1426 	int ret;
1427 
1428 	ret = clk_core_prepare_lock(core);
1429 	if (ret)
1430 		return ret;
1431 
1432 	ret = clk_core_enable_lock(core);
1433 	if (ret)
1434 		clk_core_unprepare_lock(core);
1435 
1436 	return ret;
1437 }
1438 
1439 static void clk_core_disable_unprepare(struct clk_core *core)
1440 {
1441 	clk_core_disable_lock(core);
1442 	clk_core_unprepare_lock(core);
1443 }
1444 
1445 static void __init clk_unprepare_unused_subtree(struct clk_core *core)
1446 {
1447 	struct clk_core *child;
1448 
1449 	lockdep_assert_held(&prepare_lock);
1450 
1451 	hlist_for_each_entry(child, &core->children, child_node)
1452 		clk_unprepare_unused_subtree(child);
1453 
1454 	if (core->prepare_count)
1455 		return;
1456 
1457 	if (core->flags & CLK_IGNORE_UNUSED)
1458 		return;
1459 
1460 	if (clk_core_is_prepared(core)) {
1461 		trace_clk_unprepare(core);
1462 		if (core->ops->unprepare_unused)
1463 			core->ops->unprepare_unused(core->hw);
1464 		else if (core->ops->unprepare)
1465 			core->ops->unprepare(core->hw);
1466 		trace_clk_unprepare_complete(core);
1467 	}
1468 }
1469 
1470 static void __init clk_disable_unused_subtree(struct clk_core *core)
1471 {
1472 	struct clk_core *child;
1473 	unsigned long flags;
1474 
1475 	lockdep_assert_held(&prepare_lock);
1476 
1477 	hlist_for_each_entry(child, &core->children, child_node)
1478 		clk_disable_unused_subtree(child);
1479 
1480 	if (core->flags & CLK_OPS_PARENT_ENABLE)
1481 		clk_core_prepare_enable(core->parent);
1482 
1483 	flags = clk_enable_lock();
1484 
1485 	if (core->enable_count)
1486 		goto unlock_out;
1487 
1488 	if (core->flags & CLK_IGNORE_UNUSED)
1489 		goto unlock_out;
1490 
1491 	/*
1492 	 * some gate clocks have special needs during the disable-unused
1493 	 * sequence.  call .disable_unused if available, otherwise fall
1494 	 * back to .disable
1495 	 */
1496 	if (clk_core_is_enabled(core)) {
1497 		trace_clk_disable(core);
1498 		if (core->ops->disable_unused)
1499 			core->ops->disable_unused(core->hw);
1500 		else if (core->ops->disable)
1501 			core->ops->disable(core->hw);
1502 		trace_clk_disable_complete(core);
1503 	}
1504 
1505 unlock_out:
1506 	clk_enable_unlock(flags);
1507 	if (core->flags & CLK_OPS_PARENT_ENABLE)
1508 		clk_core_disable_unprepare(core->parent);
1509 }
1510 
1511 static bool clk_ignore_unused __initdata;
1512 static int __init clk_ignore_unused_setup(char *__unused)
1513 {
1514 	clk_ignore_unused = true;
1515 	return 1;
1516 }
1517 __setup("clk_ignore_unused", clk_ignore_unused_setup);
1518 
1519 static int __init clk_disable_unused(void)
1520 {
1521 	struct clk_core *core;
1522 	int ret;
1523 
1524 	if (clk_ignore_unused) {
1525 		pr_warn("clk: Not disabling unused clocks\n");
1526 		return 0;
1527 	}
1528 
1529 	pr_info("clk: Disabling unused clocks\n");
1530 
1531 	ret = clk_pm_runtime_get_all();
1532 	if (ret)
1533 		return ret;
1534 	/*
1535 	 * Grab the prepare lock to keep the clk topology stable while iterating
1536 	 * over clks.
1537 	 */
1538 	clk_prepare_lock();
1539 
1540 	hlist_for_each_entry(core, &clk_root_list, child_node)
1541 		clk_disable_unused_subtree(core);
1542 
1543 	hlist_for_each_entry(core, &clk_orphan_list, child_node)
1544 		clk_disable_unused_subtree(core);
1545 
1546 	hlist_for_each_entry(core, &clk_root_list, child_node)
1547 		clk_unprepare_unused_subtree(core);
1548 
1549 	hlist_for_each_entry(core, &clk_orphan_list, child_node)
1550 		clk_unprepare_unused_subtree(core);
1551 
1552 	clk_prepare_unlock();
1553 
1554 	clk_pm_runtime_put_all();
1555 
1556 	return 0;
1557 }
1558 late_initcall_sync(clk_disable_unused);
1559 
1560 static int clk_core_determine_round_nolock(struct clk_core *core,
1561 					   struct clk_rate_request *req)
1562 {
1563 	lockdep_assert_held(&prepare_lock);
1564 
1565 	if (!core)
1566 		return 0;
1567 
1568 	/*
1569 	 * Some clock providers hand-craft their clk_rate_requests and
1570 	 * might not fill min_rate and max_rate.
1571 	 *
1572 	 * If it's the case, clamping the rate is equivalent to setting
1573 	 * the rate to 0 which is bad. Skip the clamping but complain so
1574 	 * that it gets fixed, hopefully.
1575 	 */
1576 	if (!req->min_rate && !req->max_rate)
1577 		pr_warn("%s: %s: clk_rate_request has initialized min or max rate.\n",
1578 			__func__, core->name);
1579 	else
1580 		req->rate = clamp(req->rate, req->min_rate, req->max_rate);
1581 
1582 	/*
1583 	 * At this point, core protection will be disabled
1584 	 * - if the provider is not protected at all
1585 	 * - if the calling consumer is the only one which has exclusivity
1586 	 *   over the provider
1587 	 */
1588 	if (clk_core_rate_is_protected(core)) {
1589 		req->rate = core->rate;
1590 	} else if (core->ops->determine_rate) {
1591 		return core->ops->determine_rate(core->hw, req);
1592 	} else {
1593 		return -EINVAL;
1594 	}
1595 
1596 	return 0;
1597 }
1598 
1599 static void clk_core_init_rate_req(struct clk_core * const core,
1600 				   struct clk_rate_request *req,
1601 				   unsigned long rate)
1602 {
1603 	struct clk_core *parent;
1604 
1605 	if (WARN_ON(!req))
1606 		return;
1607 
1608 	memset(req, 0, sizeof(*req));
1609 	req->max_rate = ULONG_MAX;
1610 
1611 	if (!core)
1612 		return;
1613 
1614 	req->core = core;
1615 	req->rate = rate;
1616 	clk_core_get_boundaries(core, &req->min_rate, &req->max_rate);
1617 
1618 	parent = core->parent;
1619 	if (parent) {
1620 		req->best_parent_hw = parent->hw;
1621 		req->best_parent_rate = parent->rate;
1622 	} else {
1623 		req->best_parent_hw = NULL;
1624 		req->best_parent_rate = 0;
1625 	}
1626 }
1627 
1628 /**
1629  * clk_hw_init_rate_request - Initializes a clk_rate_request
1630  * @hw: the clk for which we want to submit a rate request
1631  * @req: the clk_rate_request structure we want to initialise
1632  * @rate: the rate which is to be requested
1633  *
1634  * Initializes a clk_rate_request structure to submit to
1635  * __clk_determine_rate() or similar functions.
1636  */
1637 void clk_hw_init_rate_request(const struct clk_hw *hw,
1638 			      struct clk_rate_request *req,
1639 			      unsigned long rate)
1640 {
1641 	if (WARN_ON(!hw || !req))
1642 		return;
1643 
1644 	clk_core_init_rate_req(hw->core, req, rate);
1645 }
1646 EXPORT_SYMBOL_GPL(clk_hw_init_rate_request);
1647 
1648 /**
1649  * clk_hw_forward_rate_request - Forwards a clk_rate_request to a clock's parent
1650  * @hw: the original clock that got the rate request
1651  * @old_req: the original clk_rate_request structure we want to forward
1652  * @parent: the clk we want to forward @old_req to
1653  * @req: the clk_rate_request structure we want to initialise
1654  * @parent_rate: The rate which is to be requested to @parent
1655  *
1656  * Initializes a clk_rate_request structure to submit to a clock parent
1657  * in __clk_determine_rate() or similar functions.
1658  */
1659 void clk_hw_forward_rate_request(const struct clk_hw *hw,
1660 				 const struct clk_rate_request *old_req,
1661 				 const struct clk_hw *parent,
1662 				 struct clk_rate_request *req,
1663 				 unsigned long parent_rate)
1664 {
1665 	if (WARN_ON(!hw || !old_req || !parent || !req))
1666 		return;
1667 
1668 	clk_core_forward_rate_req(hw->core, old_req,
1669 				  parent->core, req,
1670 				  parent_rate);
1671 }
1672 EXPORT_SYMBOL_GPL(clk_hw_forward_rate_request);
1673 
1674 static bool clk_core_can_round(struct clk_core * const core)
1675 {
1676 	return core->ops->determine_rate;
1677 }
1678 
1679 static int clk_core_round_rate_nolock(struct clk_core *core,
1680 				      struct clk_rate_request *req)
1681 {
1682 	int ret;
1683 
1684 	lockdep_assert_held(&prepare_lock);
1685 
1686 	if (!core) {
1687 		req->rate = 0;
1688 		return 0;
1689 	}
1690 
1691 	if (clk_core_can_round(core))
1692 		return clk_core_determine_round_nolock(core, req);
1693 
1694 	if (core->flags & CLK_SET_RATE_PARENT) {
1695 		struct clk_rate_request parent_req;
1696 
1697 		clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate);
1698 
1699 		trace_clk_rate_request_start(&parent_req);
1700 
1701 		ret = clk_core_round_rate_nolock(core->parent, &parent_req);
1702 		if (ret)
1703 			return ret;
1704 
1705 		trace_clk_rate_request_done(&parent_req);
1706 
1707 		req->best_parent_rate = parent_req.rate;
1708 		req->rate = parent_req.rate;
1709 
1710 		return 0;
1711 	}
1712 
1713 	req->rate = core->rate;
1714 	return 0;
1715 }
1716 
1717 /**
1718  * __clk_determine_rate - get the closest rate actually supported by a clock
1719  * @hw: determine the rate of this clock
1720  * @req: target rate request
1721  *
1722  * Useful for clk_ops such as .set_rate and .determine_rate.
1723  */
1724 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1725 {
1726 	if (!hw) {
1727 		req->rate = 0;
1728 		return 0;
1729 	}
1730 
1731 	return clk_core_round_rate_nolock(hw->core, req);
1732 }
1733 EXPORT_SYMBOL_GPL(__clk_determine_rate);
1734 
1735 /**
1736  * clk_hw_round_rate() - round the given rate for a hw clk
1737  * @hw: the hw clk for which we are rounding a rate
1738  * @rate: the rate which is to be rounded
1739  *
1740  * Takes in a rate as input and rounds it to a rate that the clk can actually
1741  * use.
1742  *
1743  * Context: prepare_lock must be held.
1744  *          For clk providers to call from within clk_ops such as
1745  *          .determine_rate.
1746  *
1747  * Return: returns rounded rate of hw clk if clk supports determine_rate
1748  *         operation; else returns the parent rate.
1749  */
1750 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1751 {
1752 	int ret;
1753 	struct clk_rate_request req;
1754 
1755 	clk_core_init_rate_req(hw->core, &req, rate);
1756 
1757 	trace_clk_rate_request_start(&req);
1758 
1759 	ret = clk_core_round_rate_nolock(hw->core, &req);
1760 	if (ret)
1761 		return 0;
1762 
1763 	trace_clk_rate_request_done(&req);
1764 
1765 	return req.rate;
1766 }
1767 EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1768 
1769 /**
1770  * clk_round_rate - round the given rate for a clk
1771  * @clk: the clk for which we are rounding a rate
1772  * @rate: the rate which is to be rounded
1773  *
1774  * Takes in a rate as input and rounds it to a rate that the clk can actually
1775  * use which is then returned.  If clk doesn't support round_rate operation
1776  * then the parent rate is returned.
1777  */
1778 long clk_round_rate(struct clk *clk, unsigned long rate)
1779 {
1780 	struct clk_rate_request req;
1781 	int ret;
1782 
1783 	if (!clk)
1784 		return 0;
1785 
1786 	clk_prepare_lock();
1787 
1788 	if (clk->exclusive_count)
1789 		clk_core_rate_unprotect(clk->core);
1790 
1791 	clk_core_init_rate_req(clk->core, &req, rate);
1792 
1793 	trace_clk_rate_request_start(&req);
1794 
1795 	ret = clk_core_round_rate_nolock(clk->core, &req);
1796 
1797 	trace_clk_rate_request_done(&req);
1798 
1799 	if (clk->exclusive_count)
1800 		clk_core_rate_protect(clk->core);
1801 
1802 	clk_prepare_unlock();
1803 
1804 	if (ret)
1805 		return ret;
1806 
1807 	return req.rate;
1808 }
1809 EXPORT_SYMBOL_GPL(clk_round_rate);
1810 
1811 /**
1812  * __clk_notify - call clk notifier chain
1813  * @core: clk that is changing rate
1814  * @msg: clk notifier type (see include/linux/clk.h)
1815  * @old_rate: old clk rate
1816  * @new_rate: new clk rate
1817  *
1818  * Triggers a notifier call chain on the clk rate-change notification
1819  * for 'clk'.  Passes a pointer to the struct clk and the previous
1820  * and current rates to the notifier callback.  Intended to be called by
1821  * internal clock code only.  Returns NOTIFY_DONE from the last driver
1822  * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1823  * a driver returns that.
1824  */
1825 static int __clk_notify(struct clk_core *core, unsigned long msg,
1826 		unsigned long old_rate, unsigned long new_rate)
1827 {
1828 	struct clk_notifier *cn;
1829 	struct clk_notifier_data cnd;
1830 	int ret = NOTIFY_DONE;
1831 
1832 	cnd.old_rate = old_rate;
1833 	cnd.new_rate = new_rate;
1834 
1835 	list_for_each_entry(cn, &clk_notifier_list, node) {
1836 		if (cn->clk->core == core) {
1837 			cnd.clk = cn->clk;
1838 			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1839 					&cnd);
1840 			if (ret & NOTIFY_STOP_MASK)
1841 				return ret;
1842 		}
1843 	}
1844 
1845 	return ret;
1846 }
1847 
1848 /**
1849  * __clk_recalc_accuracies
1850  * @core: first clk in the subtree
1851  *
1852  * Walks the subtree of clks starting with clk and recalculates accuracies as
1853  * it goes.  Note that if a clk does not implement the .recalc_accuracy
1854  * callback then it is assumed that the clock will take on the accuracy of its
1855  * parent.
1856  */
1857 static void __clk_recalc_accuracies(struct clk_core *core)
1858 {
1859 	unsigned long parent_accuracy = 0;
1860 	struct clk_core *child;
1861 
1862 	lockdep_assert_held(&prepare_lock);
1863 
1864 	if (core->parent)
1865 		parent_accuracy = core->parent->accuracy;
1866 
1867 	if (core->ops->recalc_accuracy)
1868 		core->accuracy = core->ops->recalc_accuracy(core->hw,
1869 							  parent_accuracy);
1870 	else
1871 		core->accuracy = parent_accuracy;
1872 
1873 	hlist_for_each_entry(child, &core->children, child_node)
1874 		__clk_recalc_accuracies(child);
1875 }
1876 
1877 static long clk_core_get_accuracy_recalc(struct clk_core *core)
1878 {
1879 	if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1880 		__clk_recalc_accuracies(core);
1881 
1882 	return clk_core_get_accuracy_no_lock(core);
1883 }
1884 
1885 /**
1886  * clk_get_accuracy - return the accuracy of clk
1887  * @clk: the clk whose accuracy is being returned
1888  *
1889  * Simply returns the cached accuracy of the clk, unless
1890  * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1891  * issued.
1892  * If clk is NULL then returns 0.
1893  */
1894 long clk_get_accuracy(struct clk *clk)
1895 {
1896 	long accuracy;
1897 
1898 	if (!clk)
1899 		return 0;
1900 
1901 	clk_prepare_lock();
1902 	accuracy = clk_core_get_accuracy_recalc(clk->core);
1903 	clk_prepare_unlock();
1904 
1905 	return accuracy;
1906 }
1907 EXPORT_SYMBOL_GPL(clk_get_accuracy);
1908 
1909 static unsigned long clk_recalc(struct clk_core *core,
1910 				unsigned long parent_rate)
1911 {
1912 	unsigned long rate = parent_rate;
1913 
1914 	if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1915 		rate = core->ops->recalc_rate(core->hw, parent_rate);
1916 		clk_pm_runtime_put(core);
1917 	}
1918 	return rate;
1919 }
1920 
1921 /**
1922  * __clk_recalc_rates
1923  * @core: first clk in the subtree
1924  * @update_req: Whether req_rate should be updated with the new rate
1925  * @msg: notification type (see include/linux/clk.h)
1926  *
1927  * Walks the subtree of clks starting with clk and recalculates rates as it
1928  * goes.  Note that if a clk does not implement the .recalc_rate callback then
1929  * it is assumed that the clock will take on the rate of its parent.
1930  *
1931  * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1932  * if necessary.
1933  */
1934 static void __clk_recalc_rates(struct clk_core *core, bool update_req,
1935 			       unsigned long msg)
1936 {
1937 	unsigned long old_rate;
1938 	unsigned long parent_rate = 0;
1939 	struct clk_core *child;
1940 
1941 	lockdep_assert_held(&prepare_lock);
1942 
1943 	old_rate = core->rate;
1944 
1945 	if (core->parent)
1946 		parent_rate = core->parent->rate;
1947 
1948 	core->rate = clk_recalc(core, parent_rate);
1949 	if (update_req)
1950 		core->req_rate = core->rate;
1951 
1952 	/*
1953 	 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1954 	 * & ABORT_RATE_CHANGE notifiers
1955 	 */
1956 	if (core->notifier_count && msg)
1957 		__clk_notify(core, msg, old_rate, core->rate);
1958 
1959 	hlist_for_each_entry(child, &core->children, child_node)
1960 		__clk_recalc_rates(child, update_req, msg);
1961 }
1962 
1963 static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
1964 {
1965 	if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1966 		__clk_recalc_rates(core, false, 0);
1967 
1968 	return clk_core_get_rate_nolock(core);
1969 }
1970 
1971 /**
1972  * clk_get_rate - return the rate of clk
1973  * @clk: the clk whose rate is being returned
1974  *
1975  * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1976  * is set, which means a recalc_rate will be issued. Can be called regardless of
1977  * the clock enabledness. If clk is NULL, or if an error occurred, then returns
1978  * 0.
1979  */
1980 unsigned long clk_get_rate(struct clk *clk)
1981 {
1982 	unsigned long rate;
1983 
1984 	if (!clk)
1985 		return 0;
1986 
1987 	clk_prepare_lock();
1988 	rate = clk_core_get_rate_recalc(clk->core);
1989 	clk_prepare_unlock();
1990 
1991 	return rate;
1992 }
1993 EXPORT_SYMBOL_GPL(clk_get_rate);
1994 
1995 static int clk_fetch_parent_index(struct clk_core *core,
1996 				  struct clk_core *parent)
1997 {
1998 	int i;
1999 
2000 	if (!parent)
2001 		return -EINVAL;
2002 
2003 	for (i = 0; i < core->num_parents; i++) {
2004 		/* Found it first try! */
2005 		if (core->parents[i].core == parent)
2006 			return i;
2007 
2008 		/* Something else is here, so keep looking */
2009 		if (core->parents[i].core)
2010 			continue;
2011 
2012 		/* Maybe core hasn't been cached but the hw is all we know? */
2013 		if (core->parents[i].hw) {
2014 			if (core->parents[i].hw == parent->hw)
2015 				break;
2016 
2017 			/* Didn't match, but we're expecting a clk_hw */
2018 			continue;
2019 		}
2020 
2021 		/* Maybe it hasn't been cached (clk_set_parent() path) */
2022 		if (parent == clk_core_get(core, i))
2023 			break;
2024 
2025 		/* Fallback to comparing globally unique names */
2026 		if (core->parents[i].name &&
2027 		    !strcmp(parent->name, core->parents[i].name))
2028 			break;
2029 	}
2030 
2031 	if (i == core->num_parents)
2032 		return -EINVAL;
2033 
2034 	core->parents[i].core = parent;
2035 	return i;
2036 }
2037 
2038 /**
2039  * clk_hw_get_parent_index - return the index of the parent clock
2040  * @hw: clk_hw associated with the clk being consumed
2041  *
2042  * Fetches and returns the index of parent clock. Returns -EINVAL if the given
2043  * clock does not have a current parent.
2044  */
2045 int clk_hw_get_parent_index(struct clk_hw *hw)
2046 {
2047 	struct clk_hw *parent = clk_hw_get_parent(hw);
2048 
2049 	if (WARN_ON(parent == NULL))
2050 		return -EINVAL;
2051 
2052 	return clk_fetch_parent_index(hw->core, parent->core);
2053 }
2054 EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
2055 
2056 /*
2057  * Update the orphan status of @core and all its children.
2058  */
2059 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
2060 {
2061 	struct clk_core *child;
2062 
2063 	core->orphan = is_orphan;
2064 
2065 	hlist_for_each_entry(child, &core->children, child_node)
2066 		clk_core_update_orphan_status(child, is_orphan);
2067 }
2068 
2069 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
2070 {
2071 	bool was_orphan = core->orphan;
2072 
2073 	hlist_del(&core->child_node);
2074 
2075 	if (new_parent) {
2076 		bool becomes_orphan = new_parent->orphan;
2077 
2078 		/* avoid duplicate POST_RATE_CHANGE notifications */
2079 		if (new_parent->new_child == core)
2080 			new_parent->new_child = NULL;
2081 
2082 		hlist_add_head(&core->child_node, &new_parent->children);
2083 
2084 		if (was_orphan != becomes_orphan)
2085 			clk_core_update_orphan_status(core, becomes_orphan);
2086 	} else {
2087 		hlist_add_head(&core->child_node, &clk_orphan_list);
2088 		if (!was_orphan)
2089 			clk_core_update_orphan_status(core, true);
2090 	}
2091 
2092 	core->parent = new_parent;
2093 }
2094 
2095 static struct clk_core *__clk_set_parent_before(struct clk_core *core,
2096 					   struct clk_core *parent)
2097 {
2098 	unsigned long flags;
2099 	struct clk_core *old_parent = core->parent;
2100 
2101 	/*
2102 	 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
2103 	 *
2104 	 * 2. Migrate prepare state between parents and prevent race with
2105 	 * clk_enable().
2106 	 *
2107 	 * If the clock is not prepared, then a race with
2108 	 * clk_enable/disable() is impossible since we already have the
2109 	 * prepare lock (future calls to clk_enable() need to be preceded by
2110 	 * a clk_prepare()).
2111 	 *
2112 	 * If the clock is prepared, migrate the prepared state to the new
2113 	 * parent and also protect against a race with clk_enable() by
2114 	 * forcing the clock and the new parent on.  This ensures that all
2115 	 * future calls to clk_enable() are practically NOPs with respect to
2116 	 * hardware and software states.
2117 	 *
2118 	 * See also: Comment for clk_set_parent() below.
2119 	 */
2120 
2121 	/* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
2122 	if (core->flags & CLK_OPS_PARENT_ENABLE) {
2123 		clk_core_prepare_enable(old_parent);
2124 		clk_core_prepare_enable(parent);
2125 	}
2126 
2127 	/* migrate prepare count if > 0 */
2128 	if (core->prepare_count) {
2129 		clk_core_prepare_enable(parent);
2130 		clk_core_enable_lock(core);
2131 	}
2132 
2133 	/* update the clk tree topology */
2134 	flags = clk_enable_lock();
2135 	clk_reparent(core, parent);
2136 	clk_enable_unlock(flags);
2137 
2138 	return old_parent;
2139 }
2140 
2141 static void __clk_set_parent_after(struct clk_core *core,
2142 				   struct clk_core *parent,
2143 				   struct clk_core *old_parent)
2144 {
2145 	/*
2146 	 * Finish the migration of prepare state and undo the changes done
2147 	 * for preventing a race with clk_enable().
2148 	 */
2149 	if (core->prepare_count) {
2150 		clk_core_disable_lock(core);
2151 		clk_core_disable_unprepare(old_parent);
2152 	}
2153 
2154 	/* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
2155 	if (core->flags & CLK_OPS_PARENT_ENABLE) {
2156 		clk_core_disable_unprepare(parent);
2157 		clk_core_disable_unprepare(old_parent);
2158 	}
2159 }
2160 
2161 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
2162 			    u8 p_index)
2163 {
2164 	unsigned long flags;
2165 	int ret = 0;
2166 	struct clk_core *old_parent;
2167 
2168 	old_parent = __clk_set_parent_before(core, parent);
2169 
2170 	trace_clk_set_parent(core, parent);
2171 
2172 	/* change clock input source */
2173 	if (parent && core->ops->set_parent)
2174 		ret = core->ops->set_parent(core->hw, p_index);
2175 
2176 	trace_clk_set_parent_complete(core, parent);
2177 
2178 	if (ret) {
2179 		flags = clk_enable_lock();
2180 		clk_reparent(core, old_parent);
2181 		clk_enable_unlock(flags);
2182 
2183 		__clk_set_parent_after(core, old_parent, parent);
2184 
2185 		return ret;
2186 	}
2187 
2188 	__clk_set_parent_after(core, parent, old_parent);
2189 
2190 	return 0;
2191 }
2192 
2193 /**
2194  * __clk_speculate_rates
2195  * @core: first clk in the subtree
2196  * @parent_rate: the "future" rate of clk's parent
2197  *
2198  * Walks the subtree of clks starting with clk, speculating rates as it
2199  * goes and firing off PRE_RATE_CHANGE notifications as necessary.
2200  *
2201  * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
2202  * pre-rate change notifications and returns early if no clks in the
2203  * subtree have subscribed to the notifications.  Note that if a clk does not
2204  * implement the .recalc_rate callback then it is assumed that the clock will
2205  * take on the rate of its parent.
2206  */
2207 static int __clk_speculate_rates(struct clk_core *core,
2208 				 unsigned long parent_rate)
2209 {
2210 	struct clk_core *child;
2211 	unsigned long new_rate;
2212 	int ret = NOTIFY_DONE;
2213 
2214 	lockdep_assert_held(&prepare_lock);
2215 
2216 	new_rate = clk_recalc(core, parent_rate);
2217 
2218 	/* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
2219 	if (core->notifier_count)
2220 		ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
2221 
2222 	if (ret & NOTIFY_STOP_MASK) {
2223 		pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
2224 				__func__, core->name, ret);
2225 		goto out;
2226 	}
2227 
2228 	hlist_for_each_entry(child, &core->children, child_node) {
2229 		ret = __clk_speculate_rates(child, new_rate);
2230 		if (ret & NOTIFY_STOP_MASK)
2231 			break;
2232 	}
2233 
2234 out:
2235 	return ret;
2236 }
2237 
2238 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
2239 			     struct clk_core *new_parent, u8 p_index)
2240 {
2241 	struct clk_core *child;
2242 
2243 	core->new_rate = new_rate;
2244 	core->new_parent = new_parent;
2245 	core->new_parent_index = p_index;
2246 	/* include clk in new parent's PRE_RATE_CHANGE notifications */
2247 	core->new_child = NULL;
2248 	if (new_parent && new_parent != core->parent)
2249 		new_parent->new_child = core;
2250 
2251 	hlist_for_each_entry(child, &core->children, child_node) {
2252 		child->new_rate = clk_recalc(child, new_rate);
2253 		clk_calc_subtree(child, child->new_rate, NULL, 0);
2254 	}
2255 }
2256 
2257 /*
2258  * calculate the new rates returning the topmost clock that has to be
2259  * changed.
2260  */
2261 static struct clk_core *clk_calc_new_rates(struct clk_core *core,
2262 					   unsigned long rate)
2263 {
2264 	struct clk_core *top = core;
2265 	struct clk_core *old_parent, *parent;
2266 	unsigned long best_parent_rate = 0;
2267 	unsigned long new_rate;
2268 	unsigned long min_rate;
2269 	unsigned long max_rate;
2270 	int p_index = 0;
2271 	int ret;
2272 
2273 	/* sanity */
2274 	if (IS_ERR_OR_NULL(core))
2275 		return NULL;
2276 
2277 	/* save parent rate, if it exists */
2278 	parent = old_parent = core->parent;
2279 	if (parent)
2280 		best_parent_rate = parent->rate;
2281 
2282 	clk_core_get_boundaries(core, &min_rate, &max_rate);
2283 
2284 	/* find the closest rate and parent clk/rate */
2285 	if (clk_core_can_round(core)) {
2286 		struct clk_rate_request req;
2287 
2288 		clk_core_init_rate_req(core, &req, rate);
2289 
2290 		trace_clk_rate_request_start(&req);
2291 
2292 		ret = clk_core_determine_round_nolock(core, &req);
2293 		if (ret < 0)
2294 			return NULL;
2295 
2296 		trace_clk_rate_request_done(&req);
2297 
2298 		best_parent_rate = req.best_parent_rate;
2299 		new_rate = req.rate;
2300 		parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
2301 
2302 		if (new_rate < min_rate || new_rate > max_rate)
2303 			return NULL;
2304 	} else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
2305 		/* pass-through clock without adjustable parent */
2306 		core->new_rate = core->rate;
2307 		return NULL;
2308 	} else {
2309 		/* pass-through clock with adjustable parent */
2310 		top = clk_calc_new_rates(parent, rate);
2311 		new_rate = parent->new_rate;
2312 		goto out;
2313 	}
2314 
2315 	/* some clocks must be gated to change parent */
2316 	if (parent != old_parent &&
2317 	    (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
2318 		pr_debug("%s: %s not gated but wants to reparent\n",
2319 			 __func__, core->name);
2320 		return NULL;
2321 	}
2322 
2323 	/* try finding the new parent index */
2324 	if (parent && core->num_parents > 1) {
2325 		p_index = clk_fetch_parent_index(core, parent);
2326 		if (p_index < 0) {
2327 			pr_debug("%s: clk %s can not be parent of clk %s\n",
2328 				 __func__, parent->name, core->name);
2329 			return NULL;
2330 		}
2331 	}
2332 
2333 	if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
2334 	    best_parent_rate != parent->rate)
2335 		top = clk_calc_new_rates(parent, best_parent_rate);
2336 
2337 out:
2338 	clk_calc_subtree(core, new_rate, parent, p_index);
2339 
2340 	return top;
2341 }
2342 
2343 /*
2344  * Notify about rate changes in a subtree. Always walk down the whole tree
2345  * so that in case of an error we can walk down the whole tree again and
2346  * abort the change.
2347  */
2348 static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
2349 						  unsigned long event)
2350 {
2351 	struct clk_core *child, *tmp_clk, *fail_clk = NULL;
2352 	int ret = NOTIFY_DONE;
2353 
2354 	if (core->rate == core->new_rate)
2355 		return NULL;
2356 
2357 	if (core->notifier_count) {
2358 		ret = __clk_notify(core, event, core->rate, core->new_rate);
2359 		if (ret & NOTIFY_STOP_MASK)
2360 			fail_clk = core;
2361 	}
2362 
2363 	hlist_for_each_entry(child, &core->children, child_node) {
2364 		/* Skip children who will be reparented to another clock */
2365 		if (child->new_parent && child->new_parent != core)
2366 			continue;
2367 		tmp_clk = clk_propagate_rate_change(child, event);
2368 		if (tmp_clk)
2369 			fail_clk = tmp_clk;
2370 	}
2371 
2372 	/* handle the new child who might not be in core->children yet */
2373 	if (core->new_child) {
2374 		tmp_clk = clk_propagate_rate_change(core->new_child, event);
2375 		if (tmp_clk)
2376 			fail_clk = tmp_clk;
2377 	}
2378 
2379 	return fail_clk;
2380 }
2381 
2382 /*
2383  * walk down a subtree and set the new rates notifying the rate
2384  * change on the way
2385  */
2386 static void clk_change_rate(struct clk_core *core)
2387 {
2388 	struct clk_core *child;
2389 	struct hlist_node *tmp;
2390 	unsigned long old_rate;
2391 	unsigned long best_parent_rate = 0;
2392 	bool skip_set_rate = false;
2393 	struct clk_core *old_parent;
2394 	struct clk_core *parent = NULL;
2395 
2396 	old_rate = core->rate;
2397 
2398 	if (core->new_parent) {
2399 		parent = core->new_parent;
2400 		best_parent_rate = core->new_parent->rate;
2401 	} else if (core->parent) {
2402 		parent = core->parent;
2403 		best_parent_rate = core->parent->rate;
2404 	}
2405 
2406 	if (clk_pm_runtime_get(core))
2407 		return;
2408 
2409 	if (core->flags & CLK_SET_RATE_UNGATE) {
2410 		clk_core_prepare(core);
2411 		clk_core_enable_lock(core);
2412 	}
2413 
2414 	if (core->new_parent && core->new_parent != core->parent) {
2415 		old_parent = __clk_set_parent_before(core, core->new_parent);
2416 		trace_clk_set_parent(core, core->new_parent);
2417 
2418 		if (core->ops->set_rate_and_parent) {
2419 			skip_set_rate = true;
2420 			core->ops->set_rate_and_parent(core->hw, core->new_rate,
2421 					best_parent_rate,
2422 					core->new_parent_index);
2423 		} else if (core->ops->set_parent) {
2424 			core->ops->set_parent(core->hw, core->new_parent_index);
2425 		}
2426 
2427 		trace_clk_set_parent_complete(core, core->new_parent);
2428 		__clk_set_parent_after(core, core->new_parent, old_parent);
2429 	}
2430 
2431 	if (core->flags & CLK_OPS_PARENT_ENABLE)
2432 		clk_core_prepare_enable(parent);
2433 
2434 	trace_clk_set_rate(core, core->new_rate);
2435 
2436 	if (!skip_set_rate && core->ops->set_rate)
2437 		core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2438 
2439 	trace_clk_set_rate_complete(core, core->new_rate);
2440 
2441 	core->rate = clk_recalc(core, best_parent_rate);
2442 
2443 	if (core->flags & CLK_SET_RATE_UNGATE) {
2444 		clk_core_disable_lock(core);
2445 		clk_core_unprepare(core);
2446 	}
2447 
2448 	if (core->flags & CLK_OPS_PARENT_ENABLE)
2449 		clk_core_disable_unprepare(parent);
2450 
2451 	if (core->notifier_count && old_rate != core->rate)
2452 		__clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2453 
2454 	if (core->flags & CLK_RECALC_NEW_RATES)
2455 		(void)clk_calc_new_rates(core, core->new_rate);
2456 
2457 	/*
2458 	 * Use safe iteration, as change_rate can actually swap parents
2459 	 * for certain clock types.
2460 	 */
2461 	hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2462 		/* Skip children who will be reparented to another clock */
2463 		if (child->new_parent && child->new_parent != core)
2464 			continue;
2465 		clk_change_rate(child);
2466 	}
2467 
2468 	/* handle the new child who might not be in core->children yet */
2469 	if (core->new_child)
2470 		clk_change_rate(core->new_child);
2471 
2472 	clk_pm_runtime_put(core);
2473 }
2474 
2475 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2476 						     unsigned long req_rate)
2477 {
2478 	int ret, cnt;
2479 	struct clk_rate_request req;
2480 
2481 	lockdep_assert_held(&prepare_lock);
2482 
2483 	if (!core)
2484 		return 0;
2485 
2486 	/* simulate what the rate would be if it could be freely set */
2487 	cnt = clk_core_rate_nuke_protect(core);
2488 	if (cnt < 0)
2489 		return cnt;
2490 
2491 	clk_core_init_rate_req(core, &req, req_rate);
2492 
2493 	trace_clk_rate_request_start(&req);
2494 
2495 	ret = clk_core_round_rate_nolock(core, &req);
2496 
2497 	trace_clk_rate_request_done(&req);
2498 
2499 	/* restore the protection */
2500 	clk_core_rate_restore_protect(core, cnt);
2501 
2502 	return ret ? 0 : req.rate;
2503 }
2504 
2505 static int clk_core_set_rate_nolock(struct clk_core *core,
2506 				    unsigned long req_rate)
2507 {
2508 	struct clk_core *top, *fail_clk;
2509 	unsigned long rate;
2510 	int ret;
2511 
2512 	if (!core)
2513 		return 0;
2514 
2515 	rate = clk_core_req_round_rate_nolock(core, req_rate);
2516 
2517 	/* bail early if nothing to do */
2518 	if (rate == clk_core_get_rate_nolock(core))
2519 		return 0;
2520 
2521 	/* fail on a direct rate set of a protected provider */
2522 	if (clk_core_rate_is_protected(core))
2523 		return -EBUSY;
2524 
2525 	/* calculate new rates and get the topmost changed clock */
2526 	top = clk_calc_new_rates(core, req_rate);
2527 	if (!top)
2528 		return -EINVAL;
2529 
2530 	ret = clk_pm_runtime_get(core);
2531 	if (ret)
2532 		return ret;
2533 
2534 	/* notify that we are about to change rates */
2535 	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
2536 	if (fail_clk) {
2537 		pr_debug("%s: failed to set %s rate\n", __func__,
2538 				fail_clk->name);
2539 		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2540 		ret = -EBUSY;
2541 		goto err;
2542 	}
2543 
2544 	/* change the rates */
2545 	clk_change_rate(top);
2546 
2547 	core->req_rate = req_rate;
2548 err:
2549 	clk_pm_runtime_put(core);
2550 
2551 	return ret;
2552 }
2553 
2554 /**
2555  * clk_set_rate - specify a new rate for clk
2556  * @clk: the clk whose rate is being changed
2557  * @rate: the new rate for clk
2558  *
2559  * In the simplest case clk_set_rate will only adjust the rate of clk.
2560  *
2561  * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
2562  * propagate up to clk's parent; whether or not this happens depends on the
2563  * outcome of clk's .determine_rate implementation. If req->best_parent_rate
2564  * is unchanged after calling .determine_rate then upstream parent propagation
2565  * is ignored.  If req->best_parent_rate comes back with a new rate for clk's
2566  * parent then we propagate up to clk's parent and set its rate. Upward
2567  * propagation will continue until either a clk does not support the
2568  * CLK_SET_RATE_PARENT flag or .determine_rate stops requesting changes to
2569  * clk's parent_rate.
2570  *
2571  * Rate changes are accomplished via tree traversal that also recalculates the
2572  * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
2573  *
2574  * Returns 0 on success, -EERROR otherwise.
2575  */
2576 int clk_set_rate(struct clk *clk, unsigned long rate)
2577 {
2578 	int ret;
2579 
2580 	if (!clk)
2581 		return 0;
2582 
2583 	/* prevent racing with updates to the clock topology */
2584 	clk_prepare_lock();
2585 
2586 	if (clk->exclusive_count)
2587 		clk_core_rate_unprotect(clk->core);
2588 
2589 	ret = clk_core_set_rate_nolock(clk->core, rate);
2590 
2591 	if (clk->exclusive_count)
2592 		clk_core_rate_protect(clk->core);
2593 
2594 	clk_prepare_unlock();
2595 
2596 	return ret;
2597 }
2598 EXPORT_SYMBOL_GPL(clk_set_rate);
2599 
2600 /**
2601  * clk_set_rate_exclusive - specify a new rate and get exclusive control
2602  * @clk: the clk whose rate is being changed
2603  * @rate: the new rate for clk
2604  *
2605  * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
2606  * within a critical section
2607  *
2608  * This can be used initially to ensure that at least 1 consumer is
2609  * satisfied when several consumers are competing for exclusivity over the
2610  * same clock provider.
2611  *
2612  * The exclusivity is not applied if setting the rate failed.
2613  *
2614  * Calls to clk_rate_exclusive_get() should be balanced with calls to
2615  * clk_rate_exclusive_put().
2616  *
2617  * Returns 0 on success, -EERROR otherwise.
2618  */
2619 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2620 {
2621 	int ret;
2622 
2623 	if (!clk)
2624 		return 0;
2625 
2626 	/* prevent racing with updates to the clock topology */
2627 	clk_prepare_lock();
2628 
2629 	/*
2630 	 * The temporary protection removal is not here, on purpose
2631 	 * This function is meant to be used instead of clk_rate_protect,
2632 	 * so before the consumer code path protect the clock provider
2633 	 */
2634 
2635 	ret = clk_core_set_rate_nolock(clk->core, rate);
2636 	if (!ret) {
2637 		clk_core_rate_protect(clk->core);
2638 		clk->exclusive_count++;
2639 	}
2640 
2641 	clk_prepare_unlock();
2642 
2643 	return ret;
2644 }
2645 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2646 
2647 static int clk_set_rate_range_nolock(struct clk *clk,
2648 				     unsigned long min,
2649 				     unsigned long max)
2650 {
2651 	int ret = 0;
2652 	unsigned long old_min, old_max, rate;
2653 
2654 	lockdep_assert_held(&prepare_lock);
2655 
2656 	if (!clk)
2657 		return 0;
2658 
2659 	trace_clk_set_rate_range(clk->core, min, max);
2660 
2661 	if (min > max) {
2662 		pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2663 		       __func__, clk->core->name, clk->dev_id, clk->con_id,
2664 		       min, max);
2665 		return -EINVAL;
2666 	}
2667 
2668 	if (clk->exclusive_count)
2669 		clk_core_rate_unprotect(clk->core);
2670 
2671 	/* Save the current values in case we need to rollback the change */
2672 	old_min = clk->min_rate;
2673 	old_max = clk->max_rate;
2674 	clk->min_rate = min;
2675 	clk->max_rate = max;
2676 
2677 	if (!clk_core_check_boundaries(clk->core, min, max)) {
2678 		ret = -EINVAL;
2679 		goto out;
2680 	}
2681 
2682 	rate = clk->core->req_rate;
2683 	if (clk->core->flags & CLK_GET_RATE_NOCACHE)
2684 		rate = clk_core_get_rate_recalc(clk->core);
2685 
2686 	/*
2687 	 * Since the boundaries have been changed, let's give the
2688 	 * opportunity to the provider to adjust the clock rate based on
2689 	 * the new boundaries.
2690 	 *
2691 	 * We also need to handle the case where the clock is currently
2692 	 * outside of the boundaries. Clamping the last requested rate
2693 	 * to the current minimum and maximum will also handle this.
2694 	 *
2695 	 * FIXME:
2696 	 * There is a catch. It may fail for the usual reason (clock
2697 	 * broken, clock protected, etc) but also because:
2698 	 * - the determine_rate() callback does not really check for
2699 	 *   this corner case when determining the rate
2700 	 */
2701 	rate = clamp(rate, min, max);
2702 	ret = clk_core_set_rate_nolock(clk->core, rate);
2703 	if (ret) {
2704 		/* rollback the changes */
2705 		clk->min_rate = old_min;
2706 		clk->max_rate = old_max;
2707 	}
2708 
2709 out:
2710 	if (clk->exclusive_count)
2711 		clk_core_rate_protect(clk->core);
2712 
2713 	return ret;
2714 }
2715 
2716 /**
2717  * clk_set_rate_range - set a rate range for a clock source
2718  * @clk: clock source
2719  * @min: desired minimum clock rate in Hz, inclusive
2720  * @max: desired maximum clock rate in Hz, inclusive
2721  *
2722  * Return: 0 for success or negative errno on failure.
2723  */
2724 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2725 {
2726 	int ret;
2727 
2728 	if (!clk)
2729 		return 0;
2730 
2731 	clk_prepare_lock();
2732 
2733 	ret = clk_set_rate_range_nolock(clk, min, max);
2734 
2735 	clk_prepare_unlock();
2736 
2737 	return ret;
2738 }
2739 EXPORT_SYMBOL_GPL(clk_set_rate_range);
2740 
2741 /**
2742  * clk_set_min_rate - set a minimum clock rate for a clock source
2743  * @clk: clock source
2744  * @rate: desired minimum clock rate in Hz, inclusive
2745  *
2746  * Returns success (0) or negative errno.
2747  */
2748 int clk_set_min_rate(struct clk *clk, unsigned long rate)
2749 {
2750 	if (!clk)
2751 		return 0;
2752 
2753 	trace_clk_set_min_rate(clk->core, rate);
2754 
2755 	return clk_set_rate_range(clk, rate, clk->max_rate);
2756 }
2757 EXPORT_SYMBOL_GPL(clk_set_min_rate);
2758 
2759 /**
2760  * clk_set_max_rate - set a maximum clock rate for a clock source
2761  * @clk: clock source
2762  * @rate: desired maximum clock rate in Hz, inclusive
2763  *
2764  * Returns success (0) or negative errno.
2765  */
2766 int clk_set_max_rate(struct clk *clk, unsigned long rate)
2767 {
2768 	if (!clk)
2769 		return 0;
2770 
2771 	trace_clk_set_max_rate(clk->core, rate);
2772 
2773 	return clk_set_rate_range(clk, clk->min_rate, rate);
2774 }
2775 EXPORT_SYMBOL_GPL(clk_set_max_rate);
2776 
2777 /**
2778  * clk_get_parent - return the parent of a clk
2779  * @clk: the clk whose parent gets returned
2780  *
2781  * Simply returns clk->parent.  Returns NULL if clk is NULL.
2782  */
2783 struct clk *clk_get_parent(struct clk *clk)
2784 {
2785 	struct clk *parent;
2786 
2787 	if (!clk)
2788 		return NULL;
2789 
2790 	clk_prepare_lock();
2791 	/* TODO: Create a per-user clk and change callers to call clk_put */
2792 	parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2793 	clk_prepare_unlock();
2794 
2795 	return parent;
2796 }
2797 EXPORT_SYMBOL_GPL(clk_get_parent);
2798 
2799 static struct clk_core *__clk_init_parent(struct clk_core *core)
2800 {
2801 	u8 index = 0;
2802 
2803 	if (core->num_parents > 1 && core->ops->get_parent)
2804 		index = core->ops->get_parent(core->hw);
2805 
2806 	return clk_core_get_parent_by_index(core, index);
2807 }
2808 
2809 static void clk_core_reparent(struct clk_core *core,
2810 				  struct clk_core *new_parent)
2811 {
2812 	clk_reparent(core, new_parent);
2813 	__clk_recalc_accuracies(core);
2814 	__clk_recalc_rates(core, true, POST_RATE_CHANGE);
2815 }
2816 
2817 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2818 {
2819 	if (!hw)
2820 		return;
2821 
2822 	clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2823 }
2824 
2825 /**
2826  * clk_has_parent - check if a clock is a possible parent for another
2827  * @clk: clock source
2828  * @parent: parent clock source
2829  *
2830  * This function can be used in drivers that need to check that a clock can be
2831  * the parent of another without actually changing the parent.
2832  *
2833  * Returns true if @parent is a possible parent for @clk, false otherwise.
2834  */
2835 bool clk_has_parent(const struct clk *clk, const struct clk *parent)
2836 {
2837 	/* NULL clocks should be nops, so return success if either is NULL. */
2838 	if (!clk || !parent)
2839 		return true;
2840 
2841 	return clk_core_has_parent(clk->core, parent->core);
2842 }
2843 EXPORT_SYMBOL_GPL(clk_has_parent);
2844 
2845 static int clk_core_set_parent_nolock(struct clk_core *core,
2846 				      struct clk_core *parent)
2847 {
2848 	int ret = 0;
2849 	int p_index = 0;
2850 	unsigned long p_rate = 0;
2851 
2852 	lockdep_assert_held(&prepare_lock);
2853 
2854 	if (!core)
2855 		return 0;
2856 
2857 	if (core->parent == parent)
2858 		return 0;
2859 
2860 	/* verify ops for multi-parent clks */
2861 	if (core->num_parents > 1 && !core->ops->set_parent)
2862 		return -EPERM;
2863 
2864 	/* check that we are allowed to re-parent if the clock is in use */
2865 	if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2866 		return -EBUSY;
2867 
2868 	if (clk_core_rate_is_protected(core))
2869 		return -EBUSY;
2870 
2871 	/* try finding the new parent index */
2872 	if (parent) {
2873 		p_index = clk_fetch_parent_index(core, parent);
2874 		if (p_index < 0) {
2875 			pr_debug("%s: clk %s can not be parent of clk %s\n",
2876 					__func__, parent->name, core->name);
2877 			return p_index;
2878 		}
2879 		p_rate = parent->rate;
2880 	}
2881 
2882 	ret = clk_pm_runtime_get(core);
2883 	if (ret)
2884 		return ret;
2885 
2886 	/* propagate PRE_RATE_CHANGE notifications */
2887 	ret = __clk_speculate_rates(core, p_rate);
2888 
2889 	/* abort if a driver objects */
2890 	if (ret & NOTIFY_STOP_MASK)
2891 		goto runtime_put;
2892 
2893 	/* do the re-parent */
2894 	ret = __clk_set_parent(core, parent, p_index);
2895 
2896 	/* propagate rate an accuracy recalculation accordingly */
2897 	if (ret) {
2898 		__clk_recalc_rates(core, true, ABORT_RATE_CHANGE);
2899 	} else {
2900 		__clk_recalc_rates(core, true, POST_RATE_CHANGE);
2901 		__clk_recalc_accuracies(core);
2902 	}
2903 
2904 runtime_put:
2905 	clk_pm_runtime_put(core);
2906 
2907 	return ret;
2908 }
2909 
2910 int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
2911 {
2912 	return clk_core_set_parent_nolock(hw->core, parent->core);
2913 }
2914 EXPORT_SYMBOL_GPL(clk_hw_set_parent);
2915 
2916 /**
2917  * clk_set_parent - switch the parent of a mux clk
2918  * @clk: the mux clk whose input we are switching
2919  * @parent: the new input to clk
2920  *
2921  * Re-parent clk to use parent as its new input source.  If clk is in
2922  * prepared state, the clk will get enabled for the duration of this call. If
2923  * that's not acceptable for a specific clk (Eg: the consumer can't handle
2924  * that, the reparenting is glitchy in hardware, etc), use the
2925  * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2926  *
2927  * After successfully changing clk's parent clk_set_parent will update the
2928  * clk topology, sysfs topology and propagate rate recalculation via
2929  * __clk_recalc_rates.
2930  *
2931  * Returns 0 on success, -EERROR otherwise.
2932  */
2933 int clk_set_parent(struct clk *clk, struct clk *parent)
2934 {
2935 	int ret;
2936 
2937 	if (!clk)
2938 		return 0;
2939 
2940 	clk_prepare_lock();
2941 
2942 	if (clk->exclusive_count)
2943 		clk_core_rate_unprotect(clk->core);
2944 
2945 	ret = clk_core_set_parent_nolock(clk->core,
2946 					 parent ? parent->core : NULL);
2947 
2948 	if (clk->exclusive_count)
2949 		clk_core_rate_protect(clk->core);
2950 
2951 	clk_prepare_unlock();
2952 
2953 	return ret;
2954 }
2955 EXPORT_SYMBOL_GPL(clk_set_parent);
2956 
2957 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2958 {
2959 	int ret = -EINVAL;
2960 
2961 	lockdep_assert_held(&prepare_lock);
2962 
2963 	if (!core)
2964 		return 0;
2965 
2966 	if (clk_core_rate_is_protected(core))
2967 		return -EBUSY;
2968 
2969 	trace_clk_set_phase(core, degrees);
2970 
2971 	if (core->ops->set_phase) {
2972 		ret = core->ops->set_phase(core->hw, degrees);
2973 		if (!ret)
2974 			core->phase = degrees;
2975 	}
2976 
2977 	trace_clk_set_phase_complete(core, degrees);
2978 
2979 	return ret;
2980 }
2981 
2982 /**
2983  * clk_set_phase - adjust the phase shift of a clock signal
2984  * @clk: clock signal source
2985  * @degrees: number of degrees the signal is shifted
2986  *
2987  * Shifts the phase of a clock signal by the specified
2988  * degrees. Returns 0 on success, -EERROR otherwise.
2989  *
2990  * This function makes no distinction about the input or reference
2991  * signal that we adjust the clock signal phase against. For example
2992  * phase locked-loop clock signal generators we may shift phase with
2993  * respect to feedback clock signal input, but for other cases the
2994  * clock phase may be shifted with respect to some other, unspecified
2995  * signal.
2996  *
2997  * Additionally the concept of phase shift does not propagate through
2998  * the clock tree hierarchy, which sets it apart from clock rates and
2999  * clock accuracy. A parent clock phase attribute does not have an
3000  * impact on the phase attribute of a child clock.
3001  */
3002 int clk_set_phase(struct clk *clk, int degrees)
3003 {
3004 	int ret;
3005 
3006 	if (!clk)
3007 		return 0;
3008 
3009 	/* sanity check degrees */
3010 	degrees %= 360;
3011 	if (degrees < 0)
3012 		degrees += 360;
3013 
3014 	clk_prepare_lock();
3015 
3016 	if (clk->exclusive_count)
3017 		clk_core_rate_unprotect(clk->core);
3018 
3019 	ret = clk_core_set_phase_nolock(clk->core, degrees);
3020 
3021 	if (clk->exclusive_count)
3022 		clk_core_rate_protect(clk->core);
3023 
3024 	clk_prepare_unlock();
3025 
3026 	return ret;
3027 }
3028 EXPORT_SYMBOL_GPL(clk_set_phase);
3029 
3030 static int clk_core_get_phase(struct clk_core *core)
3031 {
3032 	int ret;
3033 
3034 	lockdep_assert_held(&prepare_lock);
3035 	if (!core->ops->get_phase)
3036 		return 0;
3037 
3038 	/* Always try to update cached phase if possible */
3039 	ret = core->ops->get_phase(core->hw);
3040 	if (ret >= 0)
3041 		core->phase = ret;
3042 
3043 	return ret;
3044 }
3045 
3046 /**
3047  * clk_get_phase - return the phase shift of a clock signal
3048  * @clk: clock signal source
3049  *
3050  * Returns the phase shift of a clock node in degrees, otherwise returns
3051  * -EERROR.
3052  */
3053 int clk_get_phase(struct clk *clk)
3054 {
3055 	int ret;
3056 
3057 	if (!clk)
3058 		return 0;
3059 
3060 	clk_prepare_lock();
3061 	ret = clk_core_get_phase(clk->core);
3062 	clk_prepare_unlock();
3063 
3064 	return ret;
3065 }
3066 EXPORT_SYMBOL_GPL(clk_get_phase);
3067 
3068 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
3069 {
3070 	/* Assume a default value of 50% */
3071 	core->duty.num = 1;
3072 	core->duty.den = 2;
3073 }
3074 
3075 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
3076 
3077 static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
3078 {
3079 	struct clk_duty *duty = &core->duty;
3080 	int ret = 0;
3081 
3082 	if (!core->ops->get_duty_cycle)
3083 		return clk_core_update_duty_cycle_parent_nolock(core);
3084 
3085 	ret = core->ops->get_duty_cycle(core->hw, duty);
3086 	if (ret)
3087 		goto reset;
3088 
3089 	/* Don't trust the clock provider too much */
3090 	if (duty->den == 0 || duty->num > duty->den) {
3091 		ret = -EINVAL;
3092 		goto reset;
3093 	}
3094 
3095 	return 0;
3096 
3097 reset:
3098 	clk_core_reset_duty_cycle_nolock(core);
3099 	return ret;
3100 }
3101 
3102 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
3103 {
3104 	int ret = 0;
3105 
3106 	if (core->parent &&
3107 	    core->flags & CLK_DUTY_CYCLE_PARENT) {
3108 		ret = clk_core_update_duty_cycle_nolock(core->parent);
3109 		memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
3110 	} else {
3111 		clk_core_reset_duty_cycle_nolock(core);
3112 	}
3113 
3114 	return ret;
3115 }
3116 
3117 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
3118 						 struct clk_duty *duty);
3119 
3120 static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
3121 					  struct clk_duty *duty)
3122 {
3123 	int ret;
3124 
3125 	lockdep_assert_held(&prepare_lock);
3126 
3127 	if (clk_core_rate_is_protected(core))
3128 		return -EBUSY;
3129 
3130 	trace_clk_set_duty_cycle(core, duty);
3131 
3132 	if (!core->ops->set_duty_cycle)
3133 		return clk_core_set_duty_cycle_parent_nolock(core, duty);
3134 
3135 	ret = core->ops->set_duty_cycle(core->hw, duty);
3136 	if (!ret)
3137 		memcpy(&core->duty, duty, sizeof(*duty));
3138 
3139 	trace_clk_set_duty_cycle_complete(core, duty);
3140 
3141 	return ret;
3142 }
3143 
3144 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
3145 						 struct clk_duty *duty)
3146 {
3147 	int ret = 0;
3148 
3149 	if (core->parent &&
3150 	    core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
3151 		ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
3152 		memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
3153 	}
3154 
3155 	return ret;
3156 }
3157 
3158 /**
3159  * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
3160  * @clk: clock signal source
3161  * @num: numerator of the duty cycle ratio to be applied
3162  * @den: denominator of the duty cycle ratio to be applied
3163  *
3164  * Apply the duty cycle ratio if the ratio is valid and the clock can
3165  * perform this operation
3166  *
3167  * Returns (0) on success, a negative errno otherwise.
3168  */
3169 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
3170 {
3171 	int ret;
3172 	struct clk_duty duty;
3173 
3174 	if (!clk)
3175 		return 0;
3176 
3177 	/* sanity check the ratio */
3178 	if (den == 0 || num > den)
3179 		return -EINVAL;
3180 
3181 	duty.num = num;
3182 	duty.den = den;
3183 
3184 	clk_prepare_lock();
3185 
3186 	if (clk->exclusive_count)
3187 		clk_core_rate_unprotect(clk->core);
3188 
3189 	ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
3190 
3191 	if (clk->exclusive_count)
3192 		clk_core_rate_protect(clk->core);
3193 
3194 	clk_prepare_unlock();
3195 
3196 	return ret;
3197 }
3198 EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
3199 
3200 static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
3201 					  unsigned int scale)
3202 {
3203 	struct clk_duty *duty = &core->duty;
3204 	int ret;
3205 
3206 	clk_prepare_lock();
3207 
3208 	ret = clk_core_update_duty_cycle_nolock(core);
3209 	if (!ret)
3210 		ret = mult_frac(scale, duty->num, duty->den);
3211 
3212 	clk_prepare_unlock();
3213 
3214 	return ret;
3215 }
3216 
3217 /**
3218  * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
3219  * @clk: clock signal source
3220  * @scale: scaling factor to be applied to represent the ratio as an integer
3221  *
3222  * Returns the duty cycle ratio of a clock node multiplied by the provided
3223  * scaling factor, or negative errno on error.
3224  */
3225 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
3226 {
3227 	if (!clk)
3228 		return 0;
3229 
3230 	return clk_core_get_scaled_duty_cycle(clk->core, scale);
3231 }
3232 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
3233 
3234 /**
3235  * clk_is_match - check if two clk's point to the same hardware clock
3236  * @p: clk compared against q
3237  * @q: clk compared against p
3238  *
3239  * Returns true if the two struct clk pointers both point to the same hardware
3240  * clock node. Put differently, returns true if struct clk *p and struct clk *q
3241  * share the same struct clk_core object.
3242  *
3243  * Returns false otherwise. Note that two NULL clks are treated as matching.
3244  */
3245 bool clk_is_match(const struct clk *p, const struct clk *q)
3246 {
3247 	/* trivial case: identical struct clk's or both NULL */
3248 	if (p == q)
3249 		return true;
3250 
3251 	/* true if clk->core pointers match. Avoid dereferencing garbage */
3252 	if (IS_ERR_OR_NULL(p) || IS_ERR_OR_NULL(q))
3253 		return false;
3254 
3255 	return p->core == q->core;
3256 }
3257 EXPORT_SYMBOL_GPL(clk_is_match);
3258 
3259 /***        debugfs support        ***/
3260 
3261 #ifdef CONFIG_DEBUG_FS
3262 #include <linux/debugfs.h>
3263 
3264 static struct dentry *rootdir;
3265 static int inited = 0;
3266 static DEFINE_MUTEX(clk_debug_lock);
3267 static HLIST_HEAD(clk_debug_list);
3268 
3269 static struct hlist_head *orphan_list[] = {
3270 	&clk_orphan_list,
3271 	NULL,
3272 };
3273 
3274 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
3275 				 int level)
3276 {
3277 	int phase;
3278 	struct clk *clk_user;
3279 	int multi_node = 0;
3280 
3281 	seq_printf(s, "%*s%-*s %-7d %-8d %-8d %-11lu %-10lu ",
3282 		   level * 3 + 1, "",
3283 		   35 - level * 3, c->name,
3284 		   c->enable_count, c->prepare_count, c->protect_count,
3285 		   clk_core_get_rate_recalc(c),
3286 		   clk_core_get_accuracy_recalc(c));
3287 
3288 	phase = clk_core_get_phase(c);
3289 	if (phase >= 0)
3290 		seq_printf(s, "%-5d", phase);
3291 	else
3292 		seq_puts(s, "-----");
3293 
3294 	seq_printf(s, " %-6d", clk_core_get_scaled_duty_cycle(c, 100000));
3295 
3296 	if (c->ops->is_enabled)
3297 		seq_printf(s, " %5c ", clk_core_is_enabled(c) ? 'Y' : 'N');
3298 	else if (!c->ops->enable)
3299 		seq_printf(s, " %5c ", 'Y');
3300 	else
3301 		seq_printf(s, " %5c ", '?');
3302 
3303 	hlist_for_each_entry(clk_user, &c->clks, clks_node) {
3304 		seq_printf(s, "%*s%-*s  %-25s\n",
3305 			   level * 3 + 2 + 105 * multi_node, "",
3306 			   30,
3307 			   clk_user->dev_id ? clk_user->dev_id : "deviceless",
3308 			   clk_user->con_id ? clk_user->con_id : "no_connection_id");
3309 
3310 		multi_node = 1;
3311 	}
3312 
3313 }
3314 
3315 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
3316 				     int level)
3317 {
3318 	struct clk_core *child;
3319 
3320 	clk_summary_show_one(s, c, level);
3321 
3322 	hlist_for_each_entry(child, &c->children, child_node)
3323 		clk_summary_show_subtree(s, child, level + 1);
3324 }
3325 
3326 static int clk_summary_show(struct seq_file *s, void *data)
3327 {
3328 	struct clk_core *c;
3329 	struct hlist_head **lists = s->private;
3330 	int ret;
3331 
3332 	seq_puts(s, "                                 enable  prepare  protect                                duty  hardware                            connection\n");
3333 	seq_puts(s, "   clock                          count    count    count        rate   accuracy phase  cycle    enable   consumer                         id\n");
3334 	seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
3335 
3336 	ret = clk_pm_runtime_get_all();
3337 	if (ret)
3338 		return ret;
3339 
3340 	clk_prepare_lock();
3341 
3342 	for (; *lists; lists++)
3343 		hlist_for_each_entry(c, *lists, child_node)
3344 			clk_summary_show_subtree(s, c, 0);
3345 
3346 	clk_prepare_unlock();
3347 	clk_pm_runtime_put_all();
3348 
3349 	return 0;
3350 }
3351 DEFINE_SHOW_ATTRIBUTE(clk_summary);
3352 
3353 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
3354 {
3355 	int phase;
3356 	unsigned long min_rate, max_rate;
3357 
3358 	clk_core_get_boundaries(c, &min_rate, &max_rate);
3359 
3360 	/* This should be JSON format, i.e. elements separated with a comma */
3361 	seq_printf(s, "\"%s\": { ", c->name);
3362 	seq_printf(s, "\"enable_count\": %d,", c->enable_count);
3363 	seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
3364 	seq_printf(s, "\"protect_count\": %d,", c->protect_count);
3365 	seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
3366 	seq_printf(s, "\"min_rate\": %lu,", min_rate);
3367 	seq_printf(s, "\"max_rate\": %lu,", max_rate);
3368 	seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
3369 	phase = clk_core_get_phase(c);
3370 	if (phase >= 0)
3371 		seq_printf(s, "\"phase\": %d,", phase);
3372 	seq_printf(s, "\"duty_cycle\": %u",
3373 		   clk_core_get_scaled_duty_cycle(c, 100000));
3374 }
3375 
3376 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
3377 {
3378 	struct clk_core *child;
3379 
3380 	clk_dump_one(s, c, level);
3381 
3382 	hlist_for_each_entry(child, &c->children, child_node) {
3383 		seq_putc(s, ',');
3384 		clk_dump_subtree(s, child, level + 1);
3385 	}
3386 
3387 	seq_putc(s, '}');
3388 }
3389 
3390 static int clk_dump_show(struct seq_file *s, void *data)
3391 {
3392 	struct clk_core *c;
3393 	bool first_node = true;
3394 	struct hlist_head **lists = s->private;
3395 	int ret;
3396 
3397 	ret = clk_pm_runtime_get_all();
3398 	if (ret)
3399 		return ret;
3400 
3401 	seq_putc(s, '{');
3402 
3403 	clk_prepare_lock();
3404 
3405 	for (; *lists; lists++) {
3406 		hlist_for_each_entry(c, *lists, child_node) {
3407 			if (!first_node)
3408 				seq_putc(s, ',');
3409 			first_node = false;
3410 			clk_dump_subtree(s, c, 0);
3411 		}
3412 	}
3413 
3414 	clk_prepare_unlock();
3415 	clk_pm_runtime_put_all();
3416 
3417 	seq_puts(s, "}\n");
3418 	return 0;
3419 }
3420 DEFINE_SHOW_ATTRIBUTE(clk_dump);
3421 
3422 #undef CLOCK_ALLOW_WRITE_DEBUGFS
3423 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3424 /*
3425  * This can be dangerous, therefore don't provide any real compile time
3426  * configuration option for this feature.
3427  * People who want to use this will need to modify the source code directly.
3428  */
3429 static int clk_rate_set(void *data, u64 val)
3430 {
3431 	struct clk_core *core = data;
3432 	int ret;
3433 
3434 	clk_prepare_lock();
3435 	ret = clk_core_set_rate_nolock(core, val);
3436 	clk_prepare_unlock();
3437 
3438 	return ret;
3439 }
3440 
3441 #define clk_rate_mode	0644
3442 
3443 static int clk_phase_set(void *data, u64 val)
3444 {
3445 	struct clk_core *core = data;
3446 	int degrees = do_div(val, 360);
3447 	int ret;
3448 
3449 	clk_prepare_lock();
3450 	ret = clk_core_set_phase_nolock(core, degrees);
3451 	clk_prepare_unlock();
3452 
3453 	return ret;
3454 }
3455 
3456 #define clk_phase_mode	0644
3457 
3458 static int clk_prepare_enable_set(void *data, u64 val)
3459 {
3460 	struct clk_core *core = data;
3461 	int ret = 0;
3462 
3463 	if (val)
3464 		ret = clk_prepare_enable(core->hw->clk);
3465 	else
3466 		clk_disable_unprepare(core->hw->clk);
3467 
3468 	return ret;
3469 }
3470 
3471 static int clk_prepare_enable_get(void *data, u64 *val)
3472 {
3473 	struct clk_core *core = data;
3474 
3475 	*val = core->enable_count && core->prepare_count;
3476 	return 0;
3477 }
3478 
3479 DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
3480 			 clk_prepare_enable_set, "%llu\n");
3481 
3482 #else
3483 #define clk_rate_set	NULL
3484 #define clk_rate_mode	0444
3485 
3486 #define clk_phase_set	NULL
3487 #define clk_phase_mode	0644
3488 #endif
3489 
3490 static int clk_rate_get(void *data, u64 *val)
3491 {
3492 	struct clk_core *core = data;
3493 
3494 	clk_prepare_lock();
3495 	*val = clk_core_get_rate_recalc(core);
3496 	clk_prepare_unlock();
3497 
3498 	return 0;
3499 }
3500 
3501 DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
3502 
3503 static int clk_phase_get(void *data, u64 *val)
3504 {
3505 	struct clk_core *core = data;
3506 
3507 	*val = core->phase;
3508 	return 0;
3509 }
3510 
3511 DEFINE_DEBUGFS_ATTRIBUTE(clk_phase_fops, clk_phase_get, clk_phase_set, "%llu\n");
3512 
3513 static const struct {
3514 	unsigned long flag;
3515 	const char *name;
3516 } clk_flags[] = {
3517 #define ENTRY(f) { f, #f }
3518 	ENTRY(CLK_SET_RATE_GATE),
3519 	ENTRY(CLK_SET_PARENT_GATE),
3520 	ENTRY(CLK_SET_RATE_PARENT),
3521 	ENTRY(CLK_IGNORE_UNUSED),
3522 	ENTRY(CLK_GET_RATE_NOCACHE),
3523 	ENTRY(CLK_SET_RATE_NO_REPARENT),
3524 	ENTRY(CLK_GET_ACCURACY_NOCACHE),
3525 	ENTRY(CLK_RECALC_NEW_RATES),
3526 	ENTRY(CLK_SET_RATE_UNGATE),
3527 	ENTRY(CLK_IS_CRITICAL),
3528 	ENTRY(CLK_OPS_PARENT_ENABLE),
3529 	ENTRY(CLK_DUTY_CYCLE_PARENT),
3530 #undef ENTRY
3531 };
3532 
3533 static int clk_flags_show(struct seq_file *s, void *data)
3534 {
3535 	struct clk_core *core = s->private;
3536 	unsigned long flags = core->flags;
3537 	unsigned int i;
3538 
3539 	for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
3540 		if (flags & clk_flags[i].flag) {
3541 			seq_printf(s, "%s\n", clk_flags[i].name);
3542 			flags &= ~clk_flags[i].flag;
3543 		}
3544 	}
3545 	if (flags) {
3546 		/* Unknown flags */
3547 		seq_printf(s, "0x%lx\n", flags);
3548 	}
3549 
3550 	return 0;
3551 }
3552 DEFINE_SHOW_ATTRIBUTE(clk_flags);
3553 
3554 static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3555 				 unsigned int i, char terminator)
3556 {
3557 	struct clk_core *parent;
3558 	const char *name = NULL;
3559 
3560 	/*
3561 	 * Go through the following options to fetch a parent's name.
3562 	 *
3563 	 * 1. Fetch the registered parent clock and use its name
3564 	 * 2. Use the global (fallback) name if specified
3565 	 * 3. Use the local fw_name if provided
3566 	 * 4. Fetch parent clock's clock-output-name if DT index was set
3567 	 *
3568 	 * This may still fail in some cases, such as when the parent is
3569 	 * specified directly via a struct clk_hw pointer, but it isn't
3570 	 * registered (yet).
3571 	 */
3572 	parent = clk_core_get_parent_by_index(core, i);
3573 	if (parent) {
3574 		seq_puts(s, parent->name);
3575 	} else if (core->parents[i].name) {
3576 		seq_puts(s, core->parents[i].name);
3577 	} else if (core->parents[i].fw_name) {
3578 		seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3579 	} else {
3580 		if (core->parents[i].index >= 0)
3581 			name = of_clk_get_parent_name(core->of_node, core->parents[i].index);
3582 		if (!name)
3583 			name = "(missing)";
3584 
3585 		seq_puts(s, name);
3586 	}
3587 
3588 	seq_putc(s, terminator);
3589 }
3590 
3591 static int possible_parents_show(struct seq_file *s, void *data)
3592 {
3593 	struct clk_core *core = s->private;
3594 	int i;
3595 
3596 	for (i = 0; i < core->num_parents - 1; i++)
3597 		possible_parent_show(s, core, i, ' ');
3598 
3599 	possible_parent_show(s, core, i, '\n');
3600 
3601 	return 0;
3602 }
3603 DEFINE_SHOW_ATTRIBUTE(possible_parents);
3604 
3605 static int current_parent_show(struct seq_file *s, void *data)
3606 {
3607 	struct clk_core *core = s->private;
3608 
3609 	if (core->parent)
3610 		seq_printf(s, "%s\n", core->parent->name);
3611 
3612 	return 0;
3613 }
3614 DEFINE_SHOW_ATTRIBUTE(current_parent);
3615 
3616 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3617 static ssize_t current_parent_write(struct file *file, const char __user *ubuf,
3618 				    size_t count, loff_t *ppos)
3619 {
3620 	struct seq_file *s = file->private_data;
3621 	struct clk_core *core = s->private;
3622 	struct clk_core *parent;
3623 	u8 idx;
3624 	int err;
3625 
3626 	err = kstrtou8_from_user(ubuf, count, 0, &idx);
3627 	if (err < 0)
3628 		return err;
3629 
3630 	parent = clk_core_get_parent_by_index(core, idx);
3631 	if (!parent)
3632 		return -ENOENT;
3633 
3634 	clk_prepare_lock();
3635 	err = clk_core_set_parent_nolock(core, parent);
3636 	clk_prepare_unlock();
3637 	if (err)
3638 		return err;
3639 
3640 	return count;
3641 }
3642 
3643 static const struct file_operations current_parent_rw_fops = {
3644 	.open		= current_parent_open,
3645 	.write		= current_parent_write,
3646 	.read		= seq_read,
3647 	.llseek		= seq_lseek,
3648 	.release	= single_release,
3649 };
3650 #endif
3651 
3652 static int clk_duty_cycle_show(struct seq_file *s, void *data)
3653 {
3654 	struct clk_core *core = s->private;
3655 	struct clk_duty *duty = &core->duty;
3656 
3657 	seq_printf(s, "%u/%u\n", duty->num, duty->den);
3658 
3659 	return 0;
3660 }
3661 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
3662 
3663 static int clk_min_rate_show(struct seq_file *s, void *data)
3664 {
3665 	struct clk_core *core = s->private;
3666 	unsigned long min_rate, max_rate;
3667 
3668 	clk_prepare_lock();
3669 	clk_core_get_boundaries(core, &min_rate, &max_rate);
3670 	clk_prepare_unlock();
3671 	seq_printf(s, "%lu\n", min_rate);
3672 
3673 	return 0;
3674 }
3675 DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
3676 
3677 static int clk_max_rate_show(struct seq_file *s, void *data)
3678 {
3679 	struct clk_core *core = s->private;
3680 	unsigned long min_rate, max_rate;
3681 
3682 	clk_prepare_lock();
3683 	clk_core_get_boundaries(core, &min_rate, &max_rate);
3684 	clk_prepare_unlock();
3685 	seq_printf(s, "%lu\n", max_rate);
3686 
3687 	return 0;
3688 }
3689 DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
3690 
3691 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
3692 {
3693 	struct dentry *root;
3694 
3695 	if (!core || !pdentry)
3696 		return;
3697 
3698 	root = debugfs_create_dir(core->name, pdentry);
3699 	core->dentry = root;
3700 
3701 	debugfs_create_file("clk_rate", clk_rate_mode, root, core,
3702 			    &clk_rate_fops);
3703 	debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
3704 	debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
3705 	debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3706 	debugfs_create_file("clk_phase", clk_phase_mode, root, core,
3707 			    &clk_phase_fops);
3708 	debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
3709 	debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3710 	debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3711 	debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3712 	debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
3713 	debugfs_create_file("clk_duty_cycle", 0444, root, core,
3714 			    &clk_duty_cycle_fops);
3715 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3716 	debugfs_create_file("clk_prepare_enable", 0644, root, core,
3717 			    &clk_prepare_enable_fops);
3718 
3719 	if (core->num_parents > 1)
3720 		debugfs_create_file("clk_parent", 0644, root, core,
3721 				    &current_parent_rw_fops);
3722 	else
3723 #endif
3724 	if (core->num_parents > 0)
3725 		debugfs_create_file("clk_parent", 0444, root, core,
3726 				    &current_parent_fops);
3727 
3728 	if (core->num_parents > 1)
3729 		debugfs_create_file("clk_possible_parents", 0444, root, core,
3730 				    &possible_parents_fops);
3731 
3732 	if (core->ops->debug_init)
3733 		core->ops->debug_init(core->hw, core->dentry);
3734 }
3735 
3736 /**
3737  * clk_debug_register - add a clk node to the debugfs clk directory
3738  * @core: the clk being added to the debugfs clk directory
3739  *
3740  * Dynamically adds a clk to the debugfs clk directory if debugfs has been
3741  * initialized.  Otherwise it bails out early since the debugfs clk directory
3742  * will be created lazily by clk_debug_init as part of a late_initcall.
3743  */
3744 static void clk_debug_register(struct clk_core *core)
3745 {
3746 	mutex_lock(&clk_debug_lock);
3747 	hlist_add_head(&core->debug_node, &clk_debug_list);
3748 	if (inited)
3749 		clk_debug_create_one(core, rootdir);
3750 	mutex_unlock(&clk_debug_lock);
3751 }
3752 
3753  /**
3754  * clk_debug_unregister - remove a clk node from the debugfs clk directory
3755  * @core: the clk being removed from the debugfs clk directory
3756  *
3757  * Dynamically removes a clk and all its child nodes from the
3758  * debugfs clk directory if clk->dentry points to debugfs created by
3759  * clk_debug_register in __clk_core_init.
3760  */
3761 static void clk_debug_unregister(struct clk_core *core)
3762 {
3763 	mutex_lock(&clk_debug_lock);
3764 	hlist_del_init(&core->debug_node);
3765 	debugfs_remove_recursive(core->dentry);
3766 	core->dentry = NULL;
3767 	mutex_unlock(&clk_debug_lock);
3768 }
3769 
3770 /**
3771  * clk_debug_init - lazily populate the debugfs clk directory
3772  *
3773  * clks are often initialized very early during boot before memory can be
3774  * dynamically allocated and well before debugfs is setup. This function
3775  * populates the debugfs clk directory once at boot-time when we know that
3776  * debugfs is setup. It should only be called once at boot-time, all other clks
3777  * added dynamically will be done so with clk_debug_register.
3778  */
3779 static int __init clk_debug_init(void)
3780 {
3781 	struct clk_core *core;
3782 
3783 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3784 	pr_warn("\n");
3785 	pr_warn("********************************************************************\n");
3786 	pr_warn("**     NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE           **\n");
3787 	pr_warn("**                                                                **\n");
3788 	pr_warn("**  WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
3789 	pr_warn("**                                                                **\n");
3790 	pr_warn("** This means that this kernel is built to expose clk operations  **\n");
3791 	pr_warn("** such as parent or rate setting, enabling, disabling, etc.      **\n");
3792 	pr_warn("** to userspace, which may compromise security on your system.    **\n");
3793 	pr_warn("**                                                                **\n");
3794 	pr_warn("** If you see this message and you are not debugging the          **\n");
3795 	pr_warn("** kernel, report this immediately to your vendor!                **\n");
3796 	pr_warn("**                                                                **\n");
3797 	pr_warn("**     NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE           **\n");
3798 	pr_warn("********************************************************************\n");
3799 #endif
3800 
3801 	rootdir = debugfs_create_dir("clk", NULL);
3802 
3803 	debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
3804 			    &clk_summary_fops);
3805 	debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
3806 			    &clk_dump_fops);
3807 	debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
3808 			    &clk_summary_fops);
3809 	debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
3810 			    &clk_dump_fops);
3811 
3812 	mutex_lock(&clk_debug_lock);
3813 	hlist_for_each_entry(core, &clk_debug_list, debug_node)
3814 		clk_debug_create_one(core, rootdir);
3815 
3816 	inited = 1;
3817 	mutex_unlock(&clk_debug_lock);
3818 
3819 	return 0;
3820 }
3821 late_initcall(clk_debug_init);
3822 #else
3823 static inline void clk_debug_register(struct clk_core *core) { }
3824 static inline void clk_debug_unregister(struct clk_core *core)
3825 {
3826 }
3827 #endif
3828 
3829 static void clk_core_reparent_orphans_nolock(void)
3830 {
3831 	struct clk_core *orphan;
3832 	struct hlist_node *tmp2;
3833 
3834 	/*
3835 	 * walk the list of orphan clocks and reparent any that newly finds a
3836 	 * parent.
3837 	 */
3838 	hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3839 		struct clk_core *parent = __clk_init_parent(orphan);
3840 
3841 		/*
3842 		 * We need to use __clk_set_parent_before() and _after() to
3843 		 * properly migrate any prepare/enable count of the orphan
3844 		 * clock. This is important for CLK_IS_CRITICAL clocks, which
3845 		 * are enabled during init but might not have a parent yet.
3846 		 */
3847 		if (parent) {
3848 			/* update the clk tree topology */
3849 			__clk_set_parent_before(orphan, parent);
3850 			__clk_set_parent_after(orphan, parent, NULL);
3851 			__clk_recalc_accuracies(orphan);
3852 			__clk_recalc_rates(orphan, true, 0);
3853 
3854 			/*
3855 			 * __clk_init_parent() will set the initial req_rate to
3856 			 * 0 if the clock doesn't have clk_ops::recalc_rate and
3857 			 * is an orphan when it's registered.
3858 			 *
3859 			 * 'req_rate' is used by clk_set_rate_range() and
3860 			 * clk_put() to trigger a clk_set_rate() call whenever
3861 			 * the boundaries are modified. Let's make sure
3862 			 * 'req_rate' is set to something non-zero so that
3863 			 * clk_set_rate_range() doesn't drop the frequency.
3864 			 */
3865 			orphan->req_rate = orphan->rate;
3866 		}
3867 	}
3868 }
3869 
3870 /**
3871  * __clk_core_init - initialize the data structures in a struct clk_core
3872  * @core:	clk_core being initialized
3873  *
3874  * Initializes the lists in struct clk_core, queries the hardware for the
3875  * parent and rate and sets them both.
3876  */
3877 static int __clk_core_init(struct clk_core *core)
3878 {
3879 	int ret;
3880 	struct clk_core *parent;
3881 	unsigned long rate;
3882 	int phase;
3883 
3884 	clk_prepare_lock();
3885 
3886 	/*
3887 	 * Set hw->core after grabbing the prepare_lock to synchronize with
3888 	 * callers of clk_core_fill_parent_index() where we treat hw->core
3889 	 * being NULL as the clk not being registered yet. This is crucial so
3890 	 * that clks aren't parented until their parent is fully registered.
3891 	 */
3892 	core->hw->core = core;
3893 
3894 	ret = clk_pm_runtime_get(core);
3895 	if (ret)
3896 		goto unlock;
3897 
3898 	/* check to see if a clock with this name is already registered */
3899 	if (clk_core_lookup(core->name)) {
3900 		pr_debug("%s: clk %s already initialized\n",
3901 				__func__, core->name);
3902 		ret = -EEXIST;
3903 		goto out;
3904 	}
3905 
3906 	/* check that clk_ops are sane.  See Documentation/driver-api/clk.rst */
3907 	if (core->ops->set_rate && !core->ops->determine_rate &&
3908 	      core->ops->recalc_rate) {
3909 		pr_err("%s: %s must implement .determine_rate in addition to .recalc_rate\n",
3910 		       __func__, core->name);
3911 		ret = -EINVAL;
3912 		goto out;
3913 	}
3914 
3915 	if (core->ops->set_parent && !core->ops->get_parent) {
3916 		pr_err("%s: %s must implement .get_parent & .set_parent\n",
3917 		       __func__, core->name);
3918 		ret = -EINVAL;
3919 		goto out;
3920 	}
3921 
3922 	if (core->ops->set_parent && !core->ops->determine_rate) {
3923 		pr_err("%s: %s must implement .set_parent & .determine_rate\n",
3924 			__func__, core->name);
3925 		ret = -EINVAL;
3926 		goto out;
3927 	}
3928 
3929 	if (core->num_parents > 1 && !core->ops->get_parent) {
3930 		pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3931 		       __func__, core->name);
3932 		ret = -EINVAL;
3933 		goto out;
3934 	}
3935 
3936 	if (core->ops->set_rate_and_parent &&
3937 			!(core->ops->set_parent && core->ops->set_rate)) {
3938 		pr_err("%s: %s must implement .set_parent & .set_rate\n",
3939 				__func__, core->name);
3940 		ret = -EINVAL;
3941 		goto out;
3942 	}
3943 
3944 	/*
3945 	 * optional platform-specific magic
3946 	 *
3947 	 * The .init callback is not used by any of the basic clock types, but
3948 	 * exists for weird hardware that must perform initialization magic for
3949 	 * CCF to get an accurate view of clock for any other callbacks. It may
3950 	 * also be used needs to perform dynamic allocations. Such allocation
3951 	 * must be freed in the terminate() callback.
3952 	 * This callback shall not be used to initialize the parameters state,
3953 	 * such as rate, parent, etc ...
3954 	 *
3955 	 * If it exist, this callback should called before any other callback of
3956 	 * the clock
3957 	 */
3958 	if (core->ops->init) {
3959 		ret = core->ops->init(core->hw);
3960 		if (ret)
3961 			goto out;
3962 	}
3963 
3964 	parent = core->parent = __clk_init_parent(core);
3965 
3966 	/*
3967 	 * Populate core->parent if parent has already been clk_core_init'd. If
3968 	 * parent has not yet been clk_core_init'd then place clk in the orphan
3969 	 * list.  If clk doesn't have any parents then place it in the root
3970 	 * clk list.
3971 	 *
3972 	 * Every time a new clk is clk_init'd then we walk the list of orphan
3973 	 * clocks and re-parent any that are children of the clock currently
3974 	 * being clk_init'd.
3975 	 */
3976 	if (parent) {
3977 		hlist_add_head(&core->child_node, &parent->children);
3978 		core->orphan = parent->orphan;
3979 	} else if (!core->num_parents) {
3980 		hlist_add_head(&core->child_node, &clk_root_list);
3981 		core->orphan = false;
3982 	} else {
3983 		hlist_add_head(&core->child_node, &clk_orphan_list);
3984 		core->orphan = true;
3985 	}
3986 	hash_add(clk_hashtable, &core->hashtable_node,
3987 		 full_name_hash(NULL, core->name, strlen(core->name)));
3988 
3989 	/*
3990 	 * Set clk's accuracy.  The preferred method is to use
3991 	 * .recalc_accuracy. For simple clocks and lazy developers the default
3992 	 * fallback is to use the parent's accuracy.  If a clock doesn't have a
3993 	 * parent (or is orphaned) then accuracy is set to zero (perfect
3994 	 * clock).
3995 	 */
3996 	if (core->ops->recalc_accuracy)
3997 		core->accuracy = core->ops->recalc_accuracy(core->hw,
3998 					clk_core_get_accuracy_no_lock(parent));
3999 	else if (parent)
4000 		core->accuracy = parent->accuracy;
4001 	else
4002 		core->accuracy = 0;
4003 
4004 	/*
4005 	 * Set clk's phase by clk_core_get_phase() caching the phase.
4006 	 * Since a phase is by definition relative to its parent, just
4007 	 * query the current clock phase, or just assume it's in phase.
4008 	 */
4009 	phase = clk_core_get_phase(core);
4010 	if (phase < 0) {
4011 		ret = phase;
4012 		pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
4013 			core->name);
4014 		goto out;
4015 	}
4016 
4017 	/*
4018 	 * Set clk's duty cycle.
4019 	 */
4020 	clk_core_update_duty_cycle_nolock(core);
4021 
4022 	/*
4023 	 * Set clk's rate.  The preferred method is to use .recalc_rate.  For
4024 	 * simple clocks and lazy developers the default fallback is to use the
4025 	 * parent's rate.  If a clock doesn't have a parent (or is orphaned)
4026 	 * then rate is set to zero.
4027 	 */
4028 	if (core->ops->recalc_rate)
4029 		rate = core->ops->recalc_rate(core->hw,
4030 				clk_core_get_rate_nolock(parent));
4031 	else if (parent)
4032 		rate = parent->rate;
4033 	else
4034 		rate = 0;
4035 	core->rate = core->req_rate = rate;
4036 
4037 	/*
4038 	 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
4039 	 * don't get accidentally disabled when walking the orphan tree and
4040 	 * reparenting clocks
4041 	 */
4042 	if (core->flags & CLK_IS_CRITICAL) {
4043 		ret = clk_core_prepare(core);
4044 		if (ret) {
4045 			pr_warn("%s: critical clk '%s' failed to prepare\n",
4046 			       __func__, core->name);
4047 			goto out;
4048 		}
4049 
4050 		ret = clk_core_enable_lock(core);
4051 		if (ret) {
4052 			pr_warn("%s: critical clk '%s' failed to enable\n",
4053 			       __func__, core->name);
4054 			clk_core_unprepare(core);
4055 			goto out;
4056 		}
4057 	}
4058 
4059 	clk_core_reparent_orphans_nolock();
4060 out:
4061 	clk_pm_runtime_put(core);
4062 unlock:
4063 	if (ret) {
4064 		hash_del(&core->hashtable_node);
4065 		hlist_del_init(&core->child_node);
4066 		core->hw->core = NULL;
4067 	}
4068 
4069 	clk_prepare_unlock();
4070 
4071 	if (!ret)
4072 		clk_debug_register(core);
4073 
4074 	return ret;
4075 }
4076 
4077 /**
4078  * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
4079  * @core: clk to add consumer to
4080  * @clk: consumer to link to a clk
4081  */
4082 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
4083 {
4084 	clk_prepare_lock();
4085 	hlist_add_head(&clk->clks_node, &core->clks);
4086 	clk_prepare_unlock();
4087 }
4088 
4089 /**
4090  * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
4091  * @clk: consumer to unlink
4092  */
4093 static void clk_core_unlink_consumer(struct clk *clk)
4094 {
4095 	lockdep_assert_held(&prepare_lock);
4096 	hlist_del(&clk->clks_node);
4097 }
4098 
4099 /**
4100  * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
4101  * @core: clk to allocate a consumer for
4102  * @dev_id: string describing device name
4103  * @con_id: connection ID string on device
4104  *
4105  * Returns: clk consumer left unlinked from the consumer list
4106  */
4107 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
4108 			     const char *con_id)
4109 {
4110 	struct clk *clk;
4111 
4112 	clk = kzalloc_obj(*clk);
4113 	if (!clk)
4114 		return ERR_PTR(-ENOMEM);
4115 
4116 	clk->core = core;
4117 	clk->dev_id = dev_id;
4118 	clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
4119 	clk->max_rate = ULONG_MAX;
4120 
4121 	return clk;
4122 }
4123 
4124 /**
4125  * free_clk - Free a clk consumer
4126  * @clk: clk consumer to free
4127  *
4128  * Note, this assumes the clk has been unlinked from the clk_core consumer
4129  * list.
4130  */
4131 static void free_clk(struct clk *clk)
4132 {
4133 	kfree_const(clk->con_id);
4134 	kfree(clk);
4135 }
4136 
4137 /**
4138  * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
4139  * a clk_hw
4140  * @dev: clk consumer device
4141  * @hw: clk_hw associated with the clk being consumed
4142  * @dev_id: string describing device name
4143  * @con_id: connection ID string on device
4144  *
4145  * This is the main function used to create a clk pointer for use by clk
4146  * consumers. It connects a consumer to the clk_core and clk_hw structures
4147  * used by the framework and clk provider respectively.
4148  */
4149 struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
4150 			      const char *dev_id, const char *con_id)
4151 {
4152 	struct clk *clk;
4153 	struct clk_core *core;
4154 
4155 	/* This is to allow this function to be chained to others */
4156 	if (IS_ERR_OR_NULL(hw))
4157 		return ERR_CAST(hw);
4158 
4159 	core = hw->core;
4160 	clk = alloc_clk(core, dev_id, con_id);
4161 	if (IS_ERR(clk))
4162 		return clk;
4163 	clk->dev = dev;
4164 
4165 	if (!try_module_get(core->owner)) {
4166 		free_clk(clk);
4167 		return ERR_PTR(-ENOENT);
4168 	}
4169 
4170 	kref_get(&core->ref);
4171 	clk_core_link_consumer(core, clk);
4172 
4173 	return clk;
4174 }
4175 
4176 /**
4177  * clk_hw_get_clk - get clk consumer given an clk_hw
4178  * @hw: clk_hw associated with the clk being consumed
4179  * @con_id: connection ID string on device
4180  *
4181  * Returns: new clk consumer
4182  * This is the function to be used by providers which need
4183  * to get a consumer clk and act on the clock element
4184  * Calls to this function must be balanced with calls clk_put()
4185  */
4186 struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
4187 {
4188 	struct device *dev = hw->core->dev;
4189 	const char *name = dev ? dev_name(dev) : NULL;
4190 
4191 	return clk_hw_create_clk(dev, hw, name, con_id);
4192 }
4193 EXPORT_SYMBOL(clk_hw_get_clk);
4194 
4195 static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
4196 {
4197 	const char *dst;
4198 
4199 	if (!src) {
4200 		if (must_exist)
4201 			return -EINVAL;
4202 		return 0;
4203 	}
4204 
4205 	*dst_p = dst = kstrdup_const(src, GFP_KERNEL);
4206 	if (!dst)
4207 		return -ENOMEM;
4208 
4209 	return 0;
4210 }
4211 
4212 static int clk_core_populate_parent_map(struct clk_core *core,
4213 					const struct clk_init_data *init)
4214 {
4215 	u8 num_parents = init->num_parents;
4216 	const char * const *parent_names = init->parent_names;
4217 	const struct clk_hw **parent_hws = init->parent_hws;
4218 	const struct clk_parent_data *parent_data = init->parent_data;
4219 	int i, ret = 0;
4220 	struct clk_parent_map *parents, *parent;
4221 
4222 	if (!num_parents)
4223 		return 0;
4224 
4225 	/*
4226 	 * Avoid unnecessary string look-ups of clk_core's possible parents by
4227 	 * having a cache of names/clk_hw pointers to clk_core pointers.
4228 	 */
4229 	parents = kzalloc_objs(*parents, num_parents);
4230 	core->parents = parents;
4231 	if (!parents)
4232 		return -ENOMEM;
4233 
4234 	/* Copy everything over because it might be __initdata */
4235 	for (i = 0, parent = parents; i < num_parents; i++, parent++) {
4236 		parent->index = -1;
4237 		if (parent_names) {
4238 			/* throw a WARN if any entries are NULL */
4239 			WARN(!parent_names[i],
4240 				"%s: invalid NULL in %s's .parent_names\n",
4241 				__func__, core->name);
4242 			ret = clk_cpy_name(&parent->name, parent_names[i],
4243 					   true);
4244 		} else if (parent_data) {
4245 			parent->hw = parent_data[i].hw;
4246 			parent->index = parent_data[i].index;
4247 			ret = clk_cpy_name(&parent->fw_name,
4248 					   parent_data[i].fw_name, false);
4249 			if (!ret)
4250 				ret = clk_cpy_name(&parent->name,
4251 						   parent_data[i].name,
4252 						   false);
4253 		} else if (parent_hws) {
4254 			parent->hw = parent_hws[i];
4255 		} else {
4256 			ret = -EINVAL;
4257 			WARN(1, "Must specify parents if num_parents > 0\n");
4258 		}
4259 
4260 		if (ret) {
4261 			do {
4262 				kfree_const(parents[i].name);
4263 				kfree_const(parents[i].fw_name);
4264 			} while (--i >= 0);
4265 			kfree(parents);
4266 
4267 			return ret;
4268 		}
4269 	}
4270 
4271 	return 0;
4272 }
4273 
4274 static void clk_core_free_parent_map(struct clk_core *core)
4275 {
4276 	int i = core->num_parents;
4277 
4278 	if (!core->num_parents)
4279 		return;
4280 
4281 	while (--i >= 0) {
4282 		kfree_const(core->parents[i].name);
4283 		kfree_const(core->parents[i].fw_name);
4284 	}
4285 
4286 	kfree(core->parents);
4287 }
4288 
4289 /* Free memory allocated for a struct clk_core */
4290 static void __clk_release(struct kref *ref)
4291 {
4292 	struct clk_core *core = container_of(ref, struct clk_core, ref);
4293 
4294 	if (core->rpm_enabled) {
4295 		mutex_lock(&clk_rpm_list_lock);
4296 		hlist_del(&core->rpm_node);
4297 		mutex_unlock(&clk_rpm_list_lock);
4298 	}
4299 
4300 	clk_core_free_parent_map(core);
4301 	kfree_const(core->name);
4302 	kfree(core);
4303 }
4304 
4305 static struct clk *
4306 __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
4307 {
4308 	int ret;
4309 	struct clk_core *core;
4310 	const struct clk_init_data *init = hw->init;
4311 
4312 	/*
4313 	 * The init data is not supposed to be used outside of registration path.
4314 	 * Set it to NULL so that provider drivers can't use it either and so that
4315 	 * we catch use of hw->init early on in the core.
4316 	 */
4317 	hw->init = NULL;
4318 
4319 	core = kzalloc_obj(*core);
4320 	if (!core) {
4321 		ret = -ENOMEM;
4322 		goto fail_out;
4323 	}
4324 
4325 	kref_init(&core->ref);
4326 
4327 	core->name = kstrdup_const(init->name, GFP_KERNEL);
4328 	if (!core->name) {
4329 		ret = -ENOMEM;
4330 		goto fail_name;
4331 	}
4332 
4333 	if (WARN_ON(!init->ops)) {
4334 		ret = -EINVAL;
4335 		goto fail_ops;
4336 	}
4337 	core->ops = init->ops;
4338 
4339 	core->dev = dev;
4340 	clk_pm_runtime_init(core);
4341 	core->of_node = np;
4342 	if (dev && dev->driver)
4343 		core->owner = dev->driver->owner;
4344 	core->hw = hw;
4345 	core->flags = init->flags;
4346 	core->num_parents = init->num_parents;
4347 	core->min_rate = 0;
4348 	core->max_rate = ULONG_MAX;
4349 
4350 	ret = clk_core_populate_parent_map(core, init);
4351 	if (ret)
4352 		goto fail_parents;
4353 
4354 	INIT_HLIST_HEAD(&core->clks);
4355 
4356 	/*
4357 	 * Don't call clk_hw_create_clk() here because that would pin the
4358 	 * provider module to itself and prevent it from ever being removed.
4359 	 */
4360 	hw->clk = alloc_clk(core, NULL, NULL);
4361 	if (IS_ERR(hw->clk)) {
4362 		ret = PTR_ERR(hw->clk);
4363 		goto fail_create_clk;
4364 	}
4365 
4366 	clk_core_link_consumer(core, hw->clk);
4367 
4368 	ret = __clk_core_init(core);
4369 	if (!ret)
4370 		return hw->clk;
4371 
4372 	clk_prepare_lock();
4373 	clk_core_unlink_consumer(hw->clk);
4374 	clk_prepare_unlock();
4375 
4376 	free_clk(hw->clk);
4377 	hw->clk = NULL;
4378 
4379 fail_create_clk:
4380 fail_parents:
4381 fail_ops:
4382 fail_name:
4383 	kref_put(&core->ref, __clk_release);
4384 fail_out:
4385 	if (dev) {
4386 		dev_err_probe(dev, ret, "failed to register clk '%s' (%pS)\n",
4387 			      init->name, hw);
4388 	} else {
4389 		pr_err("%pOF: error %pe: failed to register clk '%s' (%pS)\n",
4390 		       np, ERR_PTR(ret), init->name, hw);
4391 	}
4392 	return ERR_PTR(ret);
4393 }
4394 
4395 /**
4396  * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
4397  * @dev: Device to get device node of
4398  *
4399  * Return: device node pointer of @dev, or the device node pointer of
4400  * @dev->parent if dev doesn't have a device node, or NULL if neither
4401  * @dev or @dev->parent have a device node.
4402  */
4403 static struct device_node *dev_or_parent_of_node(struct device *dev)
4404 {
4405 	struct device_node *np;
4406 
4407 	if (!dev)
4408 		return NULL;
4409 
4410 	np = dev_of_node(dev);
4411 	if (!np)
4412 		np = dev_of_node(dev->parent);
4413 
4414 	return np;
4415 }
4416 
4417 /**
4418  * clk_register - allocate a new clock, register it and return an opaque cookie
4419  * @dev: device that is registering this clock
4420  * @hw: link to hardware-specific clock data
4421  *
4422  * clk_register is the *deprecated* interface for populating the clock tree with
4423  * new clock nodes. Use clk_hw_register() instead.
4424  *
4425  * Returns: a pointer to the newly allocated struct clk which
4426  * cannot be dereferenced by driver code but may be used in conjunction with the
4427  * rest of the clock API.  In the event of an error clk_register will return an
4428  * error code; drivers must test for an error code after calling clk_register.
4429  */
4430 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
4431 {
4432 	return __clk_register(dev, dev_or_parent_of_node(dev), hw);
4433 }
4434 EXPORT_SYMBOL_GPL(clk_register);
4435 
4436 /**
4437  * clk_hw_register - register a clk_hw and return an error code
4438  * @dev: device that is registering this clock
4439  * @hw: link to hardware-specific clock data
4440  *
4441  * clk_hw_register is the primary interface for populating the clock tree with
4442  * new clock nodes. It returns an integer equal to zero indicating success or
4443  * less than zero indicating failure. Drivers must test for an error code after
4444  * calling clk_hw_register().
4445  */
4446 int clk_hw_register(struct device *dev, struct clk_hw *hw)
4447 {
4448 	return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
4449 			       hw));
4450 }
4451 EXPORT_SYMBOL_GPL(clk_hw_register);
4452 
4453 /*
4454  * of_clk_hw_register - register a clk_hw and return an error code
4455  * @node: device_node of device that is registering this clock
4456  * @hw: link to hardware-specific clock data
4457  *
4458  * of_clk_hw_register() is the primary interface for populating the clock tree
4459  * with new clock nodes when a struct device is not available, but a struct
4460  * device_node is. It returns an integer equal to zero indicating success or
4461  * less than zero indicating failure. Drivers must test for an error code after
4462  * calling of_clk_hw_register().
4463  */
4464 int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
4465 {
4466 	return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
4467 }
4468 EXPORT_SYMBOL_GPL(of_clk_hw_register);
4469 
4470 /*
4471  * Empty clk_ops for unregistered clocks. These are used temporarily
4472  * after clk_unregister() was called on a clock and until last clock
4473  * consumer calls clk_put() and the struct clk object is freed.
4474  */
4475 static int clk_nodrv_prepare_enable(struct clk_hw *hw)
4476 {
4477 	return -ENXIO;
4478 }
4479 
4480 static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
4481 {
4482 	WARN_ON_ONCE(1);
4483 }
4484 
4485 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
4486 					unsigned long parent_rate)
4487 {
4488 	return -ENXIO;
4489 }
4490 
4491 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
4492 {
4493 	return -ENXIO;
4494 }
4495 
4496 static int clk_nodrv_determine_rate(struct clk_hw *hw,
4497 				    struct clk_rate_request *req)
4498 {
4499 	return -ENXIO;
4500 }
4501 
4502 static const struct clk_ops clk_nodrv_ops = {
4503 	.enable		= clk_nodrv_prepare_enable,
4504 	.disable	= clk_nodrv_disable_unprepare,
4505 	.prepare	= clk_nodrv_prepare_enable,
4506 	.unprepare	= clk_nodrv_disable_unprepare,
4507 	.determine_rate	= clk_nodrv_determine_rate,
4508 	.set_rate	= clk_nodrv_set_rate,
4509 	.set_parent	= clk_nodrv_set_parent,
4510 };
4511 
4512 static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
4513 						const struct clk_core *target)
4514 {
4515 	int i;
4516 	struct clk_core *child;
4517 
4518 	for (i = 0; i < root->num_parents; i++)
4519 		if (root->parents[i].core == target)
4520 			root->parents[i].core = NULL;
4521 
4522 	hlist_for_each_entry(child, &root->children, child_node)
4523 		clk_core_evict_parent_cache_subtree(child, target);
4524 }
4525 
4526 /* Remove this clk from all parent caches */
4527 static void clk_core_evict_parent_cache(struct clk_core *core)
4528 {
4529 	const struct hlist_head **lists;
4530 	struct clk_core *root;
4531 
4532 	lockdep_assert_held(&prepare_lock);
4533 
4534 	for (lists = all_lists; *lists; lists++)
4535 		hlist_for_each_entry(root, *lists, child_node)
4536 			clk_core_evict_parent_cache_subtree(root, core);
4537 
4538 }
4539 
4540 /**
4541  * clk_unregister - unregister a currently registered clock
4542  * @clk: clock to unregister
4543  */
4544 void clk_unregister(struct clk *clk)
4545 {
4546 	unsigned long flags;
4547 	const struct clk_ops *ops;
4548 
4549 	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4550 		return;
4551 
4552 	clk_debug_unregister(clk->core);
4553 
4554 	clk_prepare_lock();
4555 
4556 	ops = clk->core->ops;
4557 	if (ops == &clk_nodrv_ops) {
4558 		pr_err("%s: unregistered clock: %s\n", __func__,
4559 		       clk->core->name);
4560 		clk_prepare_unlock();
4561 		return;
4562 	}
4563 	/*
4564 	 * Assign empty clock ops for consumers that might still hold
4565 	 * a reference to this clock.
4566 	 */
4567 	flags = clk_enable_lock();
4568 	clk->core->ops = &clk_nodrv_ops;
4569 	clk_enable_unlock(flags);
4570 
4571 	if (ops->terminate)
4572 		ops->terminate(clk->core->hw);
4573 
4574 	if (!hlist_empty(&clk->core->children)) {
4575 		struct clk_core *child;
4576 		struct hlist_node *t;
4577 
4578 		/* Reparent all children to the orphan list. */
4579 		hlist_for_each_entry_safe(child, t, &clk->core->children,
4580 					  child_node)
4581 			clk_core_set_parent_nolock(child, NULL);
4582 	}
4583 
4584 	clk_core_evict_parent_cache(clk->core);
4585 
4586 	hash_del(&clk->core->hashtable_node);
4587 	hlist_del_init(&clk->core->child_node);
4588 
4589 	if (clk->core->prepare_count)
4590 		pr_warn("%s: unregistering prepared clock: %s\n",
4591 					__func__, clk->core->name);
4592 
4593 	if (clk->core->protect_count)
4594 		pr_warn("%s: unregistering protected clock: %s\n",
4595 					__func__, clk->core->name);
4596 	clk_prepare_unlock();
4597 
4598 	kref_put(&clk->core->ref, __clk_release);
4599 	free_clk(clk);
4600 }
4601 EXPORT_SYMBOL_GPL(clk_unregister);
4602 
4603 /**
4604  * clk_hw_unregister - unregister a currently registered clk_hw
4605  * @hw: hardware-specific clock data to unregister
4606  */
4607 void clk_hw_unregister(struct clk_hw *hw)
4608 {
4609 	clk_unregister(hw->clk);
4610 }
4611 EXPORT_SYMBOL_GPL(clk_hw_unregister);
4612 
4613 static void devm_clk_unregister_cb(struct device *dev, void *res)
4614 {
4615 	clk_unregister(*(struct clk **)res);
4616 }
4617 
4618 static void devm_clk_hw_unregister_cb(struct device *dev, void *res)
4619 {
4620 	clk_hw_unregister(*(struct clk_hw **)res);
4621 }
4622 
4623 /**
4624  * devm_clk_register - resource managed clk_register()
4625  * @dev: device that is registering this clock
4626  * @hw: link to hardware-specific clock data
4627  *
4628  * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
4629  *
4630  * Clocks returned from this function are automatically clk_unregister()ed on
4631  * driver detach. See clk_register() for more information.
4632  */
4633 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
4634 {
4635 	struct clk *clk;
4636 	struct clk **clkp;
4637 
4638 	clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL);
4639 	if (!clkp)
4640 		return ERR_PTR(-ENOMEM);
4641 
4642 	clk = clk_register(dev, hw);
4643 	if (!IS_ERR(clk)) {
4644 		*clkp = clk;
4645 		devres_add(dev, clkp);
4646 	} else {
4647 		devres_free(clkp);
4648 	}
4649 
4650 	return clk;
4651 }
4652 EXPORT_SYMBOL_GPL(devm_clk_register);
4653 
4654 /**
4655  * devm_clk_hw_register - resource managed clk_hw_register()
4656  * @dev: device that is registering this clock
4657  * @hw: link to hardware-specific clock data
4658  *
4659  * Managed clk_hw_register(). Clocks registered by this function are
4660  * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
4661  * for more information.
4662  */
4663 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
4664 {
4665 	struct clk_hw **hwp;
4666 	int ret;
4667 
4668 	hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL);
4669 	if (!hwp)
4670 		return -ENOMEM;
4671 
4672 	ret = clk_hw_register(dev, hw);
4673 	if (!ret) {
4674 		*hwp = hw;
4675 		devres_add(dev, hwp);
4676 	} else {
4677 		devres_free(hwp);
4678 	}
4679 
4680 	return ret;
4681 }
4682 EXPORT_SYMBOL_GPL(devm_clk_hw_register);
4683 
4684 static void devm_clk_release(struct device *dev, void *res)
4685 {
4686 	clk_put(*(struct clk **)res);
4687 }
4688 
4689 /**
4690  * devm_clk_hw_get_clk - resource managed clk_hw_get_clk()
4691  * @dev: device that is registering this clock
4692  * @hw: clk_hw associated with the clk being consumed
4693  * @con_id: connection ID string on device
4694  *
4695  * Managed clk_hw_get_clk(). Clocks got with this function are
4696  * automatically clk_put() on driver detach. See clk_put()
4697  * for more information.
4698  */
4699 struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
4700 				const char *con_id)
4701 {
4702 	struct clk *clk;
4703 	struct clk **clkp;
4704 
4705 	/* This should not happen because it would mean we have drivers
4706 	 * passing around clk_hw pointers instead of having the caller use
4707 	 * proper clk_get() style APIs
4708 	 */
4709 	WARN_ON_ONCE(dev != hw->core->dev);
4710 
4711 	clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
4712 	if (!clkp)
4713 		return ERR_PTR(-ENOMEM);
4714 
4715 	clk = clk_hw_get_clk(hw, con_id);
4716 	if (!IS_ERR(clk)) {
4717 		*clkp = clk;
4718 		devres_add(dev, clkp);
4719 	} else {
4720 		devres_free(clkp);
4721 	}
4722 
4723 	return clk;
4724 }
4725 EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk);
4726 
4727 /*
4728  * clkdev helpers
4729  */
4730 
4731 void __clk_put(struct clk *clk)
4732 {
4733 	struct module *owner;
4734 
4735 	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4736 		return;
4737 
4738 	clk_prepare_lock();
4739 
4740 	/*
4741 	 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
4742 	 * given user should be balanced with calls to clk_rate_exclusive_put()
4743 	 * and by that same consumer
4744 	 */
4745 	if (WARN_ON(clk->exclusive_count)) {
4746 		/* We voiced our concern, let's sanitize the situation */
4747 		clk->core->protect_count -= (clk->exclusive_count - 1);
4748 		clk_core_rate_unprotect(clk->core);
4749 		clk->exclusive_count = 0;
4750 	}
4751 
4752 	clk_core_unlink_consumer(clk);
4753 
4754 	/* If we had any boundaries on that clock, let's drop them. */
4755 	if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
4756 		clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
4757 
4758 	clk_prepare_unlock();
4759 
4760 	owner = clk->core->owner;
4761 	kref_put(&clk->core->ref, __clk_release);
4762 	module_put(owner);
4763 	free_clk(clk);
4764 }
4765 
4766 /***        clk rate change notifiers        ***/
4767 
4768 /**
4769  * clk_notifier_register - add a clk rate change notifier
4770  * @clk: struct clk * to watch
4771  * @nb: struct notifier_block * with callback info
4772  *
4773  * Request notification when clk's rate changes.  This uses an SRCU
4774  * notifier because we want it to block and notifier unregistrations are
4775  * uncommon.  The callbacks associated with the notifier must not
4776  * re-enter into the clk framework by calling any top-level clk APIs;
4777  * this will cause a nested prepare_lock mutex.
4778  *
4779  * In all notification cases (pre, post and abort rate change) the original
4780  * clock rate is passed to the callback via struct clk_notifier_data.old_rate
4781  * and the new frequency is passed via struct clk_notifier_data.new_rate.
4782  *
4783  * clk_notifier_register() must be called from non-atomic context.
4784  * Returns -EINVAL if called with null arguments, -ENOMEM upon
4785  * allocation failure; otherwise, passes along the return value of
4786  * srcu_notifier_chain_register().
4787  */
4788 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
4789 {
4790 	struct clk_notifier *cn;
4791 	int ret = -ENOMEM;
4792 
4793 	if (!clk || !nb)
4794 		return -EINVAL;
4795 
4796 	clk_prepare_lock();
4797 
4798 	/* search the list of notifiers for this clk */
4799 	list_for_each_entry(cn, &clk_notifier_list, node)
4800 		if (cn->clk == clk)
4801 			goto found;
4802 
4803 	/* if clk wasn't in the notifier list, allocate new clk_notifier */
4804 	cn = kzalloc_obj(*cn);
4805 	if (!cn)
4806 		goto out;
4807 
4808 	cn->clk = clk;
4809 	srcu_init_notifier_head(&cn->notifier_head);
4810 
4811 	list_add(&cn->node, &clk_notifier_list);
4812 
4813 found:
4814 	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
4815 
4816 	clk->core->notifier_count++;
4817 
4818 out:
4819 	clk_prepare_unlock();
4820 
4821 	return ret;
4822 }
4823 EXPORT_SYMBOL_GPL(clk_notifier_register);
4824 
4825 /**
4826  * clk_notifier_unregister - remove a clk rate change notifier
4827  * @clk: struct clk *
4828  * @nb: struct notifier_block * with callback info
4829  *
4830  * Request no further notification for changes to 'clk' and frees memory
4831  * allocated in clk_notifier_register.
4832  *
4833  * Returns -EINVAL if called with null arguments; otherwise, passes
4834  * along the return value of srcu_notifier_chain_unregister().
4835  */
4836 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
4837 {
4838 	struct clk_notifier *cn;
4839 	int ret = -ENOENT;
4840 
4841 	if (!clk || !nb)
4842 		return -EINVAL;
4843 
4844 	clk_prepare_lock();
4845 
4846 	list_for_each_entry(cn, &clk_notifier_list, node) {
4847 		if (cn->clk == clk) {
4848 			ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
4849 
4850 			clk->core->notifier_count--;
4851 
4852 			/* XXX the notifier code should handle this better */
4853 			if (!cn->notifier_head.head) {
4854 				srcu_cleanup_notifier_head(&cn->notifier_head);
4855 				list_del(&cn->node);
4856 				kfree(cn);
4857 			}
4858 			break;
4859 		}
4860 	}
4861 
4862 	clk_prepare_unlock();
4863 
4864 	return ret;
4865 }
4866 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
4867 
4868 struct clk_notifier_devres {
4869 	struct clk *clk;
4870 	struct notifier_block *nb;
4871 };
4872 
4873 static void devm_clk_notifier_release(struct device *dev, void *res)
4874 {
4875 	struct clk_notifier_devres *devres = res;
4876 
4877 	clk_notifier_unregister(devres->clk, devres->nb);
4878 }
4879 
4880 int devm_clk_notifier_register(struct device *dev, struct clk *clk,
4881 			       struct notifier_block *nb)
4882 {
4883 	struct clk_notifier_devres *devres;
4884 	int ret;
4885 
4886 	devres = devres_alloc(devm_clk_notifier_release,
4887 			      sizeof(*devres), GFP_KERNEL);
4888 
4889 	if (!devres)
4890 		return -ENOMEM;
4891 
4892 	ret = clk_notifier_register(clk, nb);
4893 	if (!ret) {
4894 		devres->clk = clk;
4895 		devres->nb = nb;
4896 		devres_add(dev, devres);
4897 	} else {
4898 		devres_free(devres);
4899 	}
4900 
4901 	return ret;
4902 }
4903 EXPORT_SYMBOL_GPL(devm_clk_notifier_register);
4904 
4905 #ifdef CONFIG_OF
4906 static void clk_core_reparent_orphans(void)
4907 {
4908 	clk_prepare_lock();
4909 	clk_core_reparent_orphans_nolock();
4910 	clk_prepare_unlock();
4911 }
4912 
4913 /**
4914  * struct of_clk_provider - Clock provider registration structure
4915  * @link: Entry in global list of clock providers
4916  * @node: Pointer to device tree node of clock provider
4917  * @get: Get clock callback.  Returns NULL or a struct clk for the
4918  *       given clock specifier
4919  * @get_hw: Get clk_hw callback.  Returns NULL, ERR_PTR or a
4920  *       struct clk_hw for the given clock specifier
4921  * @data: context pointer to be passed into @get callback
4922  */
4923 struct of_clk_provider {
4924 	struct list_head link;
4925 
4926 	struct device_node *node;
4927 	struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
4928 	struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
4929 	void *data;
4930 };
4931 
4932 extern struct of_device_id __clk_of_table;
4933 static const struct of_device_id __clk_of_table_sentinel
4934 	__used __section("__clk_of_table_end");
4935 
4936 static LIST_HEAD(of_clk_providers);
4937 static DEFINE_MUTEX(of_clk_mutex);
4938 
4939 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
4940 				     void *data)
4941 {
4942 	return data;
4943 }
4944 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
4945 
4946 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
4947 {
4948 	return data;
4949 }
4950 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
4951 
4952 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
4953 {
4954 	struct clk_onecell_data *clk_data = data;
4955 	unsigned int idx = clkspec->args[0];
4956 
4957 	if (idx >= clk_data->clk_num) {
4958 		pr_err("%s: invalid clock index %u\n", __func__, idx);
4959 		return ERR_PTR(-EINVAL);
4960 	}
4961 
4962 	return clk_data->clks[idx];
4963 }
4964 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
4965 
4966 struct clk_hw *
4967 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
4968 {
4969 	struct clk_hw_onecell_data *hw_data = data;
4970 	unsigned int idx = clkspec->args[0];
4971 
4972 	if (idx >= hw_data->num) {
4973 		pr_err("%s: invalid index %u\n", __func__, idx);
4974 		return ERR_PTR(-EINVAL);
4975 	}
4976 
4977 	return hw_data->hws[idx];
4978 }
4979 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
4980 
4981 /**
4982  * of_clk_add_provider() - Register a clock provider for a node
4983  * @np: Device node pointer associated with clock provider
4984  * @clk_src_get: callback for decoding clock
4985  * @data: context pointer for @clk_src_get callback.
4986  *
4987  * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
4988  */
4989 int of_clk_add_provider(struct device_node *np,
4990 			struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
4991 						   void *data),
4992 			void *data)
4993 {
4994 	struct of_clk_provider *cp;
4995 	int ret;
4996 
4997 	if (!np)
4998 		return 0;
4999 
5000 	cp = kzalloc_obj(*cp);
5001 	if (!cp)
5002 		return -ENOMEM;
5003 
5004 	cp->node = of_node_get(np);
5005 	cp->data = data;
5006 	cp->get = clk_src_get;
5007 
5008 	mutex_lock(&of_clk_mutex);
5009 	list_add(&cp->link, &of_clk_providers);
5010 	mutex_unlock(&of_clk_mutex);
5011 	pr_debug("Added clock from %pOF\n", np);
5012 
5013 	clk_core_reparent_orphans();
5014 
5015 	ret = of_clk_set_defaults(np, true);
5016 	if (ret < 0)
5017 		of_clk_del_provider(np);
5018 
5019 	fwnode_dev_initialized(&np->fwnode, true);
5020 
5021 	return ret;
5022 }
5023 EXPORT_SYMBOL_GPL(of_clk_add_provider);
5024 
5025 /**
5026  * of_clk_add_hw_provider() - Register a clock provider for a node
5027  * @np: Device node pointer associated with clock provider
5028  * @get: callback for decoding clk_hw
5029  * @data: context pointer for @get callback.
5030  */
5031 int of_clk_add_hw_provider(struct device_node *np,
5032 			   struct clk_hw *(*get)(struct of_phandle_args *clkspec,
5033 						 void *data),
5034 			   void *data)
5035 {
5036 	struct of_clk_provider *cp;
5037 	int ret;
5038 
5039 	if (!np)
5040 		return 0;
5041 
5042 	cp = kzalloc_obj(*cp);
5043 	if (!cp)
5044 		return -ENOMEM;
5045 
5046 	cp->node = of_node_get(np);
5047 	cp->data = data;
5048 	cp->get_hw = get;
5049 
5050 	mutex_lock(&of_clk_mutex);
5051 	list_add(&cp->link, &of_clk_providers);
5052 	mutex_unlock(&of_clk_mutex);
5053 	pr_debug("Added clk_hw provider from %pOF\n", np);
5054 
5055 	clk_core_reparent_orphans();
5056 
5057 	ret = of_clk_set_defaults(np, true);
5058 	if (ret < 0)
5059 		of_clk_del_provider(np);
5060 
5061 	fwnode_dev_initialized(&np->fwnode, true);
5062 
5063 	return ret;
5064 }
5065 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
5066 
5067 static void devm_of_clk_release_provider(struct device *dev, void *res)
5068 {
5069 	of_clk_del_provider(*(struct device_node **)res);
5070 }
5071 
5072 /*
5073  * We allow a child device to use its parent device as the clock provider node
5074  * for cases like MFD sub-devices where the child device driver wants to use
5075  * devm_*() APIs but not list the device in DT as a sub-node.
5076  */
5077 static struct device_node *get_clk_provider_node(struct device *dev)
5078 {
5079 	struct device_node *np, *parent_np;
5080 
5081 	np = dev->of_node;
5082 	parent_np = dev->parent ? dev->parent->of_node : NULL;
5083 
5084 	if (!of_property_present(np, "#clock-cells"))
5085 		if (of_property_present(parent_np, "#clock-cells"))
5086 			np = parent_np;
5087 
5088 	return np;
5089 }
5090 
5091 /**
5092  * devm_of_clk_add_hw_provider() - Managed clk provider node registration
5093  * @dev: Device acting as the clock provider (used for DT node and lifetime)
5094  * @get: callback for decoding clk_hw
5095  * @data: context pointer for @get callback
5096  *
5097  * Registers clock provider for given device's node. If the device has no DT
5098  * node or if the device node lacks of clock provider information (#clock-cells)
5099  * then the parent device's node is scanned for this information. If parent node
5100  * has the #clock-cells then it is used in registration. Provider is
5101  * automatically released at device exit.
5102  *
5103  * Return: 0 on success or an errno on failure.
5104  */
5105 int devm_of_clk_add_hw_provider(struct device *dev,
5106 			struct clk_hw *(*get)(struct of_phandle_args *clkspec,
5107 					      void *data),
5108 			void *data)
5109 {
5110 	struct device_node **ptr, *np;
5111 	int ret;
5112 
5113 	ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
5114 			   GFP_KERNEL);
5115 	if (!ptr)
5116 		return -ENOMEM;
5117 
5118 	np = get_clk_provider_node(dev);
5119 	ret = of_clk_add_hw_provider(np, get, data);
5120 	if (!ret) {
5121 		*ptr = np;
5122 		devres_add(dev, ptr);
5123 	} else {
5124 		devres_free(ptr);
5125 	}
5126 
5127 	return ret;
5128 }
5129 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
5130 
5131 /**
5132  * of_clk_del_provider() - Remove a previously registered clock provider
5133  * @np: Device node pointer associated with clock provider
5134  */
5135 void of_clk_del_provider(struct device_node *np)
5136 {
5137 	struct of_clk_provider *cp;
5138 
5139 	if (!np)
5140 		return;
5141 
5142 	mutex_lock(&of_clk_mutex);
5143 	list_for_each_entry(cp, &of_clk_providers, link) {
5144 		if (cp->node == np) {
5145 			list_del(&cp->link);
5146 			fwnode_dev_initialized(&np->fwnode, false);
5147 			of_node_put(cp->node);
5148 			kfree(cp);
5149 			break;
5150 		}
5151 	}
5152 	mutex_unlock(&of_clk_mutex);
5153 }
5154 EXPORT_SYMBOL_GPL(of_clk_del_provider);
5155 
5156 /**
5157  * of_parse_clkspec() - Parse a DT clock specifier for a given device node
5158  * @np: device node to parse clock specifier from
5159  * @index: index of phandle to parse clock out of. If index < 0, @name is used
5160  * @name: clock name to find and parse. If name is NULL, the index is used
5161  * @out_args: Result of parsing the clock specifier
5162  *
5163  * Parses a device node's "clocks" and "clock-names" properties to find the
5164  * phandle and cells for the index or name that is desired. The resulting clock
5165  * specifier is placed into @out_args, or an errno is returned when there's a
5166  * parsing error. The @index argument is ignored if @name is non-NULL.
5167  *
5168  * Example:
5169  *
5170  * phandle1: clock-controller@1 {
5171  *	#clock-cells = <2>;
5172  * }
5173  *
5174  * phandle2: clock-controller@2 {
5175  *	#clock-cells = <1>;
5176  * }
5177  *
5178  * clock-consumer@3 {
5179  *	clocks = <&phandle1 1 2 &phandle2 3>;
5180  *	clock-names = "name1", "name2";
5181  * }
5182  *
5183  * To get a device_node for `clock-controller@2' node you may call this
5184  * function a few different ways:
5185  *
5186  *   of_parse_clkspec(clock-consumer@3, -1, "name2", &args);
5187  *   of_parse_clkspec(clock-consumer@3, 1, NULL, &args);
5188  *   of_parse_clkspec(clock-consumer@3, 1, "name2", &args);
5189  *
5190  * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT
5191  * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in
5192  * the "clock-names" property of @np.
5193  */
5194 static int of_parse_clkspec(const struct device_node *np, int index,
5195 			    const char *name, struct of_phandle_args *out_args)
5196 {
5197 	int ret = -ENOENT;
5198 
5199 	/* Walk up the tree of devices looking for a clock property that matches */
5200 	while (np) {
5201 		/*
5202 		 * For named clocks, first look up the name in the
5203 		 * "clock-names" property.  If it cannot be found, then index
5204 		 * will be an error code and of_parse_phandle_with_args() will
5205 		 * return -EINVAL.
5206 		 */
5207 		if (name)
5208 			index = of_property_match_string(np, "clock-names", name);
5209 		ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
5210 						 index, out_args);
5211 		if (!ret)
5212 			break;
5213 		if (name && index >= 0)
5214 			break;
5215 
5216 		/*
5217 		 * No matching clock found on this node.  If the parent node
5218 		 * has a "clock-ranges" property, then we can try one of its
5219 		 * clocks.
5220 		 */
5221 		np = np->parent;
5222 		if (np && !of_property_present(np, "clock-ranges"))
5223 			break;
5224 		index = 0;
5225 	}
5226 
5227 	return ret;
5228 }
5229 
5230 static struct clk_hw *
5231 __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
5232 			      struct of_phandle_args *clkspec)
5233 {
5234 	struct clk *clk;
5235 
5236 	if (provider->get_hw)
5237 		return provider->get_hw(clkspec, provider->data);
5238 
5239 	clk = provider->get(clkspec, provider->data);
5240 	if (IS_ERR(clk))
5241 		return ERR_CAST(clk);
5242 	return __clk_get_hw(clk);
5243 }
5244 
5245 static struct clk_hw *
5246 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
5247 {
5248 	struct of_clk_provider *provider;
5249 	struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
5250 
5251 	if (!clkspec)
5252 		return ERR_PTR(-EINVAL);
5253 
5254 	/* Check if node in clkspec is in disabled/fail state */
5255 	if (!of_device_is_available(clkspec->np))
5256 		return ERR_PTR(-ENOENT);
5257 
5258 	mutex_lock(&of_clk_mutex);
5259 	list_for_each_entry(provider, &of_clk_providers, link) {
5260 		if (provider->node == clkspec->np) {
5261 			hw = __of_clk_get_hw_from_provider(provider, clkspec);
5262 			if (!IS_ERR(hw))
5263 				break;
5264 		}
5265 	}
5266 	mutex_unlock(&of_clk_mutex);
5267 
5268 	return hw;
5269 }
5270 
5271 /**
5272  * of_clk_get_from_provider() - Lookup a clock from a clock provider
5273  * @clkspec: pointer to a clock specifier data structure
5274  *
5275  * This function looks up a struct clk from the registered list of clock
5276  * providers, an input is a clock specifier data structure as returned
5277  * from the of_parse_phandle_with_args() function call.
5278  */
5279 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
5280 {
5281 	struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
5282 
5283 	return clk_hw_create_clk(NULL, hw, NULL, __func__);
5284 }
5285 EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
5286 
5287 struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
5288 			     const char *con_id)
5289 {
5290 	int ret;
5291 	struct clk_hw *hw;
5292 	struct of_phandle_args clkspec;
5293 
5294 	ret = of_parse_clkspec(np, index, con_id, &clkspec);
5295 	if (ret)
5296 		return ERR_PTR(ret);
5297 
5298 	hw = of_clk_get_hw_from_clkspec(&clkspec);
5299 	of_node_put(clkspec.np);
5300 
5301 	return hw;
5302 }
5303 
5304 static struct clk *__of_clk_get(struct device_node *np,
5305 				int index, const char *dev_id,
5306 				const char *con_id)
5307 {
5308 	struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
5309 
5310 	return clk_hw_create_clk(NULL, hw, dev_id, con_id);
5311 }
5312 
5313 struct clk *of_clk_get(struct device_node *np, int index)
5314 {
5315 	return __of_clk_get(np, index, np->full_name, NULL);
5316 }
5317 EXPORT_SYMBOL(of_clk_get);
5318 
5319 /**
5320  * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
5321  * @np: pointer to clock consumer node
5322  * @name: name of consumer's clock input, or NULL for the first clock reference
5323  *
5324  * This function parses the clocks and clock-names properties,
5325  * and uses them to look up the struct clk from the registered list of clock
5326  * providers.
5327  */
5328 struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
5329 {
5330 	if (!np)
5331 		return ERR_PTR(-ENOENT);
5332 
5333 	return __of_clk_get(np, 0, np->full_name, name);
5334 }
5335 EXPORT_SYMBOL(of_clk_get_by_name);
5336 
5337 /**
5338  * of_clk_get_parent_count() - Count the number of clocks a device node has
5339  * @np: device node to count
5340  *
5341  * Returns: The number of clocks that are possible parents of this node
5342  */
5343 unsigned int of_clk_get_parent_count(const struct device_node *np)
5344 {
5345 	int count;
5346 
5347 	count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
5348 	if (count < 0)
5349 		return 0;
5350 
5351 	return count;
5352 }
5353 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
5354 
5355 const char *of_clk_get_parent_name(const struct device_node *np, int index)
5356 {
5357 	struct of_phandle_args clkspec;
5358 	const char *clk_name;
5359 	bool found = false;
5360 	u32 pv;
5361 	int rc;
5362 	int count;
5363 	struct clk *clk;
5364 
5365 	rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
5366 					&clkspec);
5367 	if (rc)
5368 		return NULL;
5369 
5370 	index = clkspec.args_count ? clkspec.args[0] : 0;
5371 	count = 0;
5372 
5373 	/* if there is an indices property, use it to transfer the index
5374 	 * specified into an array offset for the clock-output-names property.
5375 	 */
5376 	of_property_for_each_u32(clkspec.np, "clock-indices", pv) {
5377 		if (index == pv) {
5378 			index = count;
5379 			found = true;
5380 			break;
5381 		}
5382 		count++;
5383 	}
5384 	/* We went off the end of 'clock-indices' without finding it */
5385 	if (of_property_present(clkspec.np, "clock-indices") && !found) {
5386 		of_node_put(clkspec.np);
5387 		return NULL;
5388 	}
5389 
5390 	if (of_property_read_string_index(clkspec.np, "clock-output-names",
5391 					  index,
5392 					  &clk_name) < 0) {
5393 		/*
5394 		 * Best effort to get the name if the clock has been
5395 		 * registered with the framework. If the clock isn't
5396 		 * registered, we return the node name as the name of
5397 		 * the clock as long as #clock-cells = 0.
5398 		 */
5399 		clk = of_clk_get_from_provider(&clkspec);
5400 		if (IS_ERR(clk)) {
5401 			if (clkspec.args_count == 0)
5402 				clk_name = clkspec.np->name;
5403 			else
5404 				clk_name = NULL;
5405 		} else {
5406 			clk_name = __clk_get_name(clk);
5407 			clk_put(clk);
5408 		}
5409 	}
5410 
5411 
5412 	of_node_put(clkspec.np);
5413 	return clk_name;
5414 }
5415 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
5416 
5417 /**
5418  * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
5419  * number of parents
5420  * @np: Device node pointer associated with clock provider
5421  * @parents: pointer to char array that hold the parents' names
5422  * @size: size of the @parents array
5423  *
5424  * Return: number of parents for the clock node.
5425  */
5426 int of_clk_parent_fill(struct device_node *np, const char **parents,
5427 		       unsigned int size)
5428 {
5429 	unsigned int i = 0;
5430 
5431 	while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
5432 		i++;
5433 
5434 	return i;
5435 }
5436 EXPORT_SYMBOL_GPL(of_clk_parent_fill);
5437 
5438 struct clock_provider {
5439 	void (*clk_init_cb)(struct device_node *);
5440 	struct device_node *np;
5441 	struct list_head node;
5442 };
5443 
5444 /*
5445  * This function looks for a parent clock. If there is one, then it
5446  * checks that the provider for this parent clock was initialized, in
5447  * this case the parent clock will be ready.
5448  */
5449 static int parent_ready(struct device_node *np)
5450 {
5451 	int i = 0;
5452 
5453 	while (true) {
5454 		struct clk *clk = of_clk_get(np, i);
5455 
5456 		/* this parent is ready we can check the next one */
5457 		if (!IS_ERR(clk)) {
5458 			clk_put(clk);
5459 			i++;
5460 			continue;
5461 		}
5462 
5463 		/* at least one parent is not ready, we exit now */
5464 		if (PTR_ERR(clk) == -EPROBE_DEFER)
5465 			return 0;
5466 
5467 		/*
5468 		 * Here we make assumption that the device tree is
5469 		 * written correctly. So an error means that there is
5470 		 * no more parent. As we didn't exit yet, then the
5471 		 * previous parent are ready. If there is no clock
5472 		 * parent, no need to wait for them, then we can
5473 		 * consider their absence as being ready
5474 		 */
5475 		return 1;
5476 	}
5477 }
5478 
5479 /**
5480  * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
5481  * @np: Device node pointer associated with clock provider
5482  * @index: clock index
5483  * @flags: pointer to top-level framework flags
5484  *
5485  * Detects if the clock-critical property exists and, if so, sets the
5486  * corresponding CLK_IS_CRITICAL flag.
5487  *
5488  * Do not use this function. It exists only for legacy Device Tree
5489  * bindings, such as the one-clock-per-node style that are outdated.
5490  * Those bindings typically put all clock data into .dts and the Linux
5491  * driver has no clock data, thus making it impossible to set this flag
5492  * correctly from the driver. Only those drivers may call
5493  * of_clk_detect_critical from their setup functions.
5494  *
5495  * Return: error code or zero on success
5496  */
5497 int of_clk_detect_critical(struct device_node *np, int index,
5498 			   unsigned long *flags)
5499 {
5500 	uint32_t idx;
5501 
5502 	if (!np || !flags)
5503 		return -EINVAL;
5504 
5505 	of_property_for_each_u32(np, "clock-critical", idx)
5506 		if (index == idx)
5507 			*flags |= CLK_IS_CRITICAL;
5508 
5509 	return 0;
5510 }
5511 
5512 /**
5513  * of_clk_init() - Scan and init clock providers from the DT
5514  * @matches: array of compatible values and init functions for providers.
5515  *
5516  * This function scans the device tree for matching clock providers
5517  * and calls their initialization functions. It also does it by trying
5518  * to follow the dependencies.
5519  */
5520 void __init of_clk_init(const struct of_device_id *matches)
5521 {
5522 	const struct of_device_id *match;
5523 	struct device_node *np;
5524 	struct clock_provider *clk_provider, *next;
5525 	bool is_init_done;
5526 	bool force = false;
5527 	LIST_HEAD(clk_provider_list);
5528 
5529 	if (!matches)
5530 		matches = &__clk_of_table;
5531 
5532 	/* First prepare the list of the clocks providers */
5533 	for_each_matching_node_and_match(np, matches, &match) {
5534 		struct clock_provider *parent;
5535 
5536 		if (!of_device_is_available(np))
5537 			continue;
5538 
5539 		parent = kzalloc_obj(*parent);
5540 		if (!parent) {
5541 			list_for_each_entry_safe(clk_provider, next,
5542 						 &clk_provider_list, node) {
5543 				list_del(&clk_provider->node);
5544 				of_node_put(clk_provider->np);
5545 				kfree(clk_provider);
5546 			}
5547 			of_node_put(np);
5548 			return;
5549 		}
5550 
5551 		parent->clk_init_cb = match->data;
5552 		parent->np = of_node_get(np);
5553 		list_add_tail(&parent->node, &clk_provider_list);
5554 	}
5555 
5556 	while (!list_empty(&clk_provider_list)) {
5557 		is_init_done = false;
5558 		list_for_each_entry_safe(clk_provider, next,
5559 					&clk_provider_list, node) {
5560 			if (force || parent_ready(clk_provider->np)) {
5561 
5562 				/* Don't populate platform devices */
5563 				of_node_set_flag(clk_provider->np,
5564 						 OF_POPULATED);
5565 
5566 				clk_provider->clk_init_cb(clk_provider->np);
5567 				of_clk_set_defaults(clk_provider->np, true);
5568 
5569 				list_del(&clk_provider->node);
5570 				of_node_put(clk_provider->np);
5571 				kfree(clk_provider);
5572 				is_init_done = true;
5573 			}
5574 		}
5575 
5576 		/*
5577 		 * We didn't manage to initialize any of the
5578 		 * remaining providers during the last loop, so now we
5579 		 * initialize all the remaining ones unconditionally
5580 		 * in case the clock parent was not mandatory
5581 		 */
5582 		if (!is_init_done)
5583 			force = true;
5584 	}
5585 }
5586 #endif
5587