xref: /linux/drivers/clk/bcm/clk-kona.c (revision 9f32a03e3e0d372c520d829dd4da6022fe88832a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Broadcom Corporation
4  * Copyright 2013 Linaro Limited
5  */
6 
7 #include "clk-kona.h"
8 
9 #include <linux/delay.h>
10 #include <linux/io.h>
11 #include <linux/kernel.h>
12 #include <linux/clk-provider.h>
13 #include <linux/string_choices.h>
14 
15 /*
16  * "Policies" affect the frequencies of bus clocks provided by a
17  * CCU.  (I believe these polices are named "Deep Sleep", "Economy",
18  * "Normal", and "Turbo".)  A lower policy number has lower power
19  * consumption, and policy 2 is the default.
20  */
21 #define CCU_POLICY_COUNT	4
22 
23 #define CCU_ACCESS_PASSWORD      0xA5A500
24 #define CLK_GATE_DELAY_LOOP      2000
25 
26 /* Bitfield operations */
27 
28 /* Produces a mask of set bits covering a range of a 32-bit value */
bitfield_mask(u32 shift,u32 width)29 static inline u32 bitfield_mask(u32 shift, u32 width)
30 {
31 	return ((1 << width) - 1) << shift;
32 }
33 
34 /* Extract the value of a bitfield found within a given register value */
bitfield_extract(u32 reg_val,u32 shift,u32 width)35 static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width)
36 {
37 	return (reg_val & bitfield_mask(shift, width)) >> shift;
38 }
39 
40 /* Replace the value of a bitfield found within a given register value */
bitfield_replace(u32 reg_val,u32 shift,u32 width,u32 val)41 static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
42 {
43 	u32 mask = bitfield_mask(shift, width);
44 
45 	return (reg_val & ~mask) | (val << shift);
46 }
47 
48 /* Divider and scaling helpers */
49 
50 /* Convert a divider into the scaled divisor value it represents. */
scaled_div_value(struct bcm_clk_div * div,u32 reg_div)51 static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
52 {
53 	return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
54 }
55 
56 /* The scaled minimum divisor representable by a divider */
57 static inline u64
scaled_div_min(struct bcm_clk_div * div)58 scaled_div_min(struct bcm_clk_div *div)
59 {
60 	if (divider_is_fixed(div))
61 		return (u64)div->u.fixed;
62 
63 	return scaled_div_value(div, 0);
64 }
65 
66 /* The scaled maximum divisor representable by a divider */
scaled_div_max(struct bcm_clk_div * div)67 u64 scaled_div_max(struct bcm_clk_div *div)
68 {
69 	u32 reg_div;
70 
71 	if (divider_is_fixed(div))
72 		return (u64)div->u.fixed;
73 
74 	reg_div = ((u32)1 << div->u.s.width) - 1;
75 
76 	return scaled_div_value(div, reg_div);
77 }
78 
79 /*
80  * Convert a scaled divisor into its divider representation as
81  * stored in a divider register field.
82  */
83 static inline u32
divider(struct bcm_clk_div * div,u64 scaled_div)84 divider(struct bcm_clk_div *div, u64 scaled_div)
85 {
86 	BUG_ON(scaled_div < scaled_div_min(div));
87 	BUG_ON(scaled_div > scaled_div_max(div));
88 
89 	return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
90 }
91 
92 /* Return a rate scaled for use when dividing by a scaled divisor. */
93 static inline u64
scale_rate(struct bcm_clk_div * div,u32 rate)94 scale_rate(struct bcm_clk_div *div, u32 rate)
95 {
96 	if (divider_is_fixed(div))
97 		return (u64)rate;
98 
99 	return (u64)rate << div->u.s.frac_width;
100 }
101 
102 /* CCU access */
103 
104 /* Read a 32-bit register value from a CCU's address space. */
__ccu_read(struct ccu_data * ccu,u32 reg_offset)105 static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset)
106 {
107 	return readl(ccu->base + reg_offset);
108 }
109 
110 /* Write a 32-bit register value into a CCU's address space. */
111 static inline void
__ccu_write(struct ccu_data * ccu,u32 reg_offset,u32 reg_val)112 __ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val)
113 {
114 	writel(reg_val, ccu->base + reg_offset);
115 }
116 
ccu_lock(struct ccu_data * ccu)117 static inline unsigned long ccu_lock(struct ccu_data *ccu)
118 {
119 	unsigned long flags;
120 
121 	spin_lock_irqsave(&ccu->lock, flags);
122 
123 	return flags;
124 }
ccu_unlock(struct ccu_data * ccu,unsigned long flags)125 static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags)
126 {
127 	spin_unlock_irqrestore(&ccu->lock, flags);
128 }
129 
130 /*
131  * Enable/disable write access to CCU protected registers.  The
132  * WR_ACCESS register for all CCUs is at offset 0.
133  */
__ccu_write_enable(struct ccu_data * ccu)134 static inline void __ccu_write_enable(struct ccu_data *ccu)
135 {
136 	if (ccu->write_enabled) {
137 		pr_err("%s: access already enabled for %s\n", __func__,
138 			ccu->name);
139 		return;
140 	}
141 	ccu->write_enabled = true;
142 	__ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1);
143 }
144 
__ccu_write_disable(struct ccu_data * ccu)145 static inline void __ccu_write_disable(struct ccu_data *ccu)
146 {
147 	if (!ccu->write_enabled) {
148 		pr_err("%s: access wasn't enabled for %s\n", __func__,
149 			ccu->name);
150 		return;
151 	}
152 
153 	__ccu_write(ccu, 0, CCU_ACCESS_PASSWORD);
154 	ccu->write_enabled = false;
155 }
156 
157 /*
158  * Poll a register in a CCU's address space, returning when the
159  * specified bit in that register's value is set (or clear).  Delay
160  * a microsecond after each read of the register.  Returns true if
161  * successful, or false if we gave up trying.
162  *
163  * Caller must ensure the CCU lock is held.
164  */
165 static inline bool
__ccu_wait_bit(struct ccu_data * ccu,u32 reg_offset,u32 bit,bool want)166 __ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want)
167 {
168 	unsigned int tries;
169 	u32 bit_mask = 1 << bit;
170 
171 	for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) {
172 		u32 val;
173 		bool bit_val;
174 
175 		val = __ccu_read(ccu, reg_offset);
176 		bit_val = (val & bit_mask) != 0;
177 		if (bit_val == want)
178 			return true;
179 		udelay(1);
180 	}
181 	pr_warn("%s: %s/0x%04x bit %u was never %s\n", __func__,
182 		ccu->name, reg_offset, bit, want ? "set" : "clear");
183 
184 	return false;
185 }
186 
187 /* Policy operations */
188 
__ccu_policy_engine_start(struct ccu_data * ccu,bool sync)189 static bool __ccu_policy_engine_start(struct ccu_data *ccu, bool sync)
190 {
191 	struct bcm_policy_ctl *control = &ccu->policy.control;
192 	u32 offset;
193 	u32 go_bit;
194 	u32 mask;
195 	bool ret;
196 
197 	/* If we don't need to control policy for this CCU, we're done. */
198 	if (!policy_ctl_exists(control))
199 		return true;
200 
201 	offset = control->offset;
202 	go_bit = control->go_bit;
203 
204 	/* Ensure we're not busy before we start */
205 	ret = __ccu_wait_bit(ccu, offset, go_bit, false);
206 	if (!ret) {
207 		pr_err("%s: ccu %s policy engine wouldn't go idle\n",
208 			__func__, ccu->name);
209 		return false;
210 	}
211 
212 	/*
213 	 * If it's a synchronous request, we'll wait for the voltage
214 	 * and frequency of the active load to stabilize before
215 	 * returning.  To do this we select the active load by
216 	 * setting the ATL bit.
217 	 *
218 	 * An asynchronous request instead ramps the voltage in the
219 	 * background, and when that process stabilizes, the target
220 	 * load is copied to the active load and the CCU frequency
221 	 * is switched.  We do this by selecting the target load
222 	 * (ATL bit clear) and setting the request auto-copy (AC bit
223 	 * set).
224 	 *
225 	 * Note, we do NOT read-modify-write this register.
226 	 */
227 	mask = (u32)1 << go_bit;
228 	if (sync)
229 		mask |= 1 << control->atl_bit;
230 	else
231 		mask |= 1 << control->ac_bit;
232 	__ccu_write(ccu, offset, mask);
233 
234 	/* Wait for indication that operation is complete. */
235 	ret = __ccu_wait_bit(ccu, offset, go_bit, false);
236 	if (!ret)
237 		pr_err("%s: ccu %s policy engine never started\n",
238 			__func__, ccu->name);
239 
240 	return ret;
241 }
242 
__ccu_policy_engine_stop(struct ccu_data * ccu)243 static bool __ccu_policy_engine_stop(struct ccu_data *ccu)
244 {
245 	struct bcm_lvm_en *enable = &ccu->policy.enable;
246 	u32 offset;
247 	u32 enable_bit;
248 	bool ret;
249 
250 	/* If we don't need to control policy for this CCU, we're done. */
251 	if (!policy_lvm_en_exists(enable))
252 		return true;
253 
254 	/* Ensure we're not busy before we start */
255 	offset = enable->offset;
256 	enable_bit = enable->bit;
257 	ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
258 	if (!ret) {
259 		pr_err("%s: ccu %s policy engine already stopped\n",
260 			__func__, ccu->name);
261 		return false;
262 	}
263 
264 	/* Now set the bit to stop the engine (NO read-modify-write) */
265 	__ccu_write(ccu, offset, (u32)1 << enable_bit);
266 
267 	/* Wait for indication that it has stopped. */
268 	ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
269 	if (!ret)
270 		pr_err("%s: ccu %s policy engine never stopped\n",
271 			__func__, ccu->name);
272 
273 	return ret;
274 }
275 
276 /*
277  * A CCU has four operating conditions ("policies"), and some clocks
278  * can be disabled or enabled based on which policy is currently in
279  * effect.  Such clocks have a bit in a "policy mask" register for
280  * each policy indicating whether the clock is enabled for that
281  * policy or not.  The bit position for a clock is the same for all
282  * four registers, and the 32-bit registers are at consecutive
283  * addresses.
284  */
policy_init(struct ccu_data * ccu,struct bcm_clk_policy * policy)285 static bool policy_init(struct ccu_data *ccu, struct bcm_clk_policy *policy)
286 {
287 	u32 offset;
288 	u32 mask;
289 	int i;
290 	bool ret;
291 
292 	if (!policy_exists(policy))
293 		return true;
294 
295 	/*
296 	 * We need to stop the CCU policy engine to allow update
297 	 * of our policy bits.
298 	 */
299 	if (!__ccu_policy_engine_stop(ccu)) {
300 		pr_err("%s: unable to stop CCU %s policy engine\n",
301 			__func__, ccu->name);
302 		return false;
303 	}
304 
305 	/*
306 	 * For now, if a clock defines its policy bit we just mark
307 	 * it "enabled" for all four policies.
308 	 */
309 	offset = policy->offset;
310 	mask = (u32)1 << policy->bit;
311 	for (i = 0; i < CCU_POLICY_COUNT; i++) {
312 		u32 reg_val;
313 
314 		reg_val = __ccu_read(ccu, offset);
315 		reg_val |= mask;
316 		__ccu_write(ccu, offset, reg_val);
317 		offset += sizeof(u32);
318 	}
319 
320 	/* We're done updating; fire up the policy engine again. */
321 	ret = __ccu_policy_engine_start(ccu, true);
322 	if (!ret)
323 		pr_err("%s: unable to restart CCU %s policy engine\n",
324 			__func__, ccu->name);
325 
326 	return ret;
327 }
328 
329 /* Gate operations */
330 
331 /* Determine whether a clock is gated.  CCU lock must be held.  */
332 static bool
__is_clk_gate_enabled(struct ccu_data * ccu,struct bcm_clk_gate * gate)333 __is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
334 {
335 	u32 bit_mask;
336 	u32 reg_val;
337 
338 	/* If there is no gate we can assume it's enabled. */
339 	if (!gate_exists(gate))
340 		return true;
341 
342 	bit_mask = 1 << gate->status_bit;
343 	reg_val = __ccu_read(ccu, gate->offset);
344 
345 	return (reg_val & bit_mask) != 0;
346 }
347 
348 /* Determine whether a clock is gated. */
349 static bool
is_clk_gate_enabled(struct ccu_data * ccu,struct bcm_clk_gate * gate)350 is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
351 {
352 	long flags;
353 	bool ret;
354 
355 	/* Avoid taking the lock if we can */
356 	if (!gate_exists(gate))
357 		return true;
358 
359 	flags = ccu_lock(ccu);
360 	ret = __is_clk_gate_enabled(ccu, gate);
361 	ccu_unlock(ccu, flags);
362 
363 	return ret;
364 }
365 
366 /*
367  * Commit our desired gate state to the hardware.
368  * Returns true if successful, false otherwise.
369  */
370 static bool
__gate_commit(struct ccu_data * ccu,struct bcm_clk_gate * gate)371 __gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate)
372 {
373 	u32 reg_val;
374 	u32 mask;
375 	bool enabled = false;
376 
377 	BUG_ON(!gate_exists(gate));
378 	if (!gate_is_sw_controllable(gate))
379 		return true;		/* Nothing we can change */
380 
381 	reg_val = __ccu_read(ccu, gate->offset);
382 
383 	/* For a hardware/software gate, set which is in control */
384 	if (gate_is_hw_controllable(gate)) {
385 		mask = (u32)1 << gate->hw_sw_sel_bit;
386 		if (gate_is_sw_managed(gate))
387 			reg_val |= mask;
388 		else
389 			reg_val &= ~mask;
390 	}
391 
392 	/*
393 	 * If software is in control, enable or disable the gate.
394 	 * If hardware is, clear the enabled bit for good measure.
395 	 * If a software controlled gate can't be disabled, we're
396 	 * required to write a 0 into the enable bit (but the gate
397 	 * will be enabled).
398 	 */
399 	mask = (u32)1 << gate->en_bit;
400 	if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) &&
401 			!gate_is_no_disable(gate))
402 		reg_val |= mask;
403 	else
404 		reg_val &= ~mask;
405 
406 	__ccu_write(ccu, gate->offset, reg_val);
407 
408 	/* For a hardware controlled gate, we're done */
409 	if (!gate_is_sw_managed(gate))
410 		return true;
411 
412 	/* Otherwise wait for the gate to be in desired state */
413 	return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled);
414 }
415 
416 /*
417  * Initialize a gate.  Our desired state (hardware/software select,
418  * and if software, its enable state) is committed to hardware
419  * without the usual checks to see if it's already set up that way.
420  * Returns true if successful, false otherwise.
421  */
gate_init(struct ccu_data * ccu,struct bcm_clk_gate * gate)422 static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate)
423 {
424 	if (!gate_exists(gate))
425 		return true;
426 	return __gate_commit(ccu, gate);
427 }
428 
429 /*
430  * Set a gate to enabled or disabled state.  Does nothing if the
431  * gate is not currently under software control, or if it is already
432  * in the requested state.  Returns true if successful, false
433  * otherwise.  CCU lock must be held.
434  */
435 static bool
__clk_gate(struct ccu_data * ccu,struct bcm_clk_gate * gate,bool enable)436 __clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable)
437 {
438 	bool ret;
439 
440 	if (!gate_exists(gate) || !gate_is_sw_managed(gate))
441 		return true;	/* Nothing to do */
442 
443 	if (!enable && gate_is_no_disable(gate)) {
444 		pr_warn("%s: invalid gate disable request (ignoring)\n",
445 			__func__);
446 		return true;
447 	}
448 
449 	if (enable == gate_is_enabled(gate))
450 		return true;	/* No change */
451 
452 	gate_flip_enabled(gate);
453 	ret = __gate_commit(ccu, gate);
454 	if (!ret)
455 		gate_flip_enabled(gate);	/* Revert the change */
456 
457 	return ret;
458 }
459 
460 /* Enable or disable a gate.  Returns 0 if successful, -EIO otherwise */
clk_gate(struct ccu_data * ccu,const char * name,struct bcm_clk_gate * gate,bool enable)461 static int clk_gate(struct ccu_data *ccu, const char *name,
462 			struct bcm_clk_gate *gate, bool enable)
463 {
464 	unsigned long flags;
465 	bool success;
466 
467 	/*
468 	 * Avoid taking the lock if we can.  We quietly ignore
469 	 * requests to change state that don't make sense.
470 	 */
471 	if (!gate_exists(gate) || !gate_is_sw_managed(gate))
472 		return 0;
473 	if (!enable && gate_is_no_disable(gate))
474 		return 0;
475 
476 	flags = ccu_lock(ccu);
477 	__ccu_write_enable(ccu);
478 
479 	success = __clk_gate(ccu, gate, enable);
480 
481 	__ccu_write_disable(ccu);
482 	ccu_unlock(ccu, flags);
483 
484 	if (success)
485 		return 0;
486 
487 	pr_err("%s: failed to %s gate for %s\n", __func__,
488 		str_enable_disable(enable), name);
489 
490 	return -EIO;
491 }
492 
493 /* Hysteresis operations */
494 
495 /*
496  * If a clock gate requires a turn-off delay it will have
497  * "hysteresis" register bits defined.  The first, if set, enables
498  * the delay; and if enabled, the second bit determines whether the
499  * delay is "low" or "high" (1 means high).  For now, if it's
500  * defined for a clock, we set it.
501  */
hyst_init(struct ccu_data * ccu,struct bcm_clk_hyst * hyst)502 static bool hyst_init(struct ccu_data *ccu, struct bcm_clk_hyst *hyst)
503 {
504 	u32 offset;
505 	u32 reg_val;
506 	u32 mask;
507 
508 	if (!hyst_exists(hyst))
509 		return true;
510 
511 	offset = hyst->offset;
512 	mask = (u32)1 << hyst->en_bit;
513 	mask |= (u32)1 << hyst->val_bit;
514 
515 	reg_val = __ccu_read(ccu, offset);
516 	reg_val |= mask;
517 	__ccu_write(ccu, offset, reg_val);
518 
519 	return true;
520 }
521 
522 /* Trigger operations */
523 
524 /*
525  * Caller must ensure CCU lock is held and access is enabled.
526  * Returns true if successful, false otherwise.
527  */
__clk_trigger(struct ccu_data * ccu,struct bcm_clk_trig * trig)528 static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig)
529 {
530 	/* Trigger the clock and wait for it to finish */
531 	__ccu_write(ccu, trig->offset, 1 << trig->bit);
532 
533 	return __ccu_wait_bit(ccu, trig->offset, trig->bit, false);
534 }
535 
536 /* Divider operations */
537 
538 /* Read a divider value and return the scaled divisor it represents. */
divider_read_scaled(struct ccu_data * ccu,struct bcm_clk_div * div)539 static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
540 {
541 	unsigned long flags;
542 	u32 reg_val;
543 	u32 reg_div;
544 
545 	if (divider_is_fixed(div))
546 		return (u64)div->u.fixed;
547 
548 	flags = ccu_lock(ccu);
549 	reg_val = __ccu_read(ccu, div->u.s.offset);
550 	ccu_unlock(ccu, flags);
551 
552 	/* Extract the full divider field from the register value */
553 	reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
554 
555 	/* Return the scaled divisor value it represents */
556 	return scaled_div_value(div, reg_div);
557 }
558 
559 /*
560  * Convert a divider's scaled divisor value into its recorded form
561  * and commit it into the hardware divider register.
562  *
563  * Returns 0 on success.  Returns -EINVAL for invalid arguments.
564  * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
565  */
__div_commit(struct ccu_data * ccu,struct bcm_clk_gate * gate,struct bcm_clk_div * div,struct bcm_clk_trig * trig)566 static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
567 			struct bcm_clk_div *div, struct bcm_clk_trig *trig)
568 {
569 	bool enabled;
570 	u32 reg_div;
571 	u32 reg_val;
572 	int ret = 0;
573 
574 	BUG_ON(divider_is_fixed(div));
575 
576 	/*
577 	 * If we're just initializing the divider, and no initial
578 	 * state was defined in the device tree, we just find out
579 	 * what its current value is rather than updating it.
580 	 */
581 	if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
582 		reg_val = __ccu_read(ccu, div->u.s.offset);
583 		reg_div = bitfield_extract(reg_val, div->u.s.shift,
584 						div->u.s.width);
585 		div->u.s.scaled_div = scaled_div_value(div, reg_div);
586 
587 		return 0;
588 	}
589 
590 	/* Convert the scaled divisor to the value we need to record */
591 	reg_div = divider(div, div->u.s.scaled_div);
592 
593 	/* Clock needs to be enabled before changing the rate */
594 	enabled = __is_clk_gate_enabled(ccu, gate);
595 	if (!enabled && !__clk_gate(ccu, gate, true)) {
596 		ret = -ENXIO;
597 		goto out;
598 	}
599 
600 	/* Replace the divider value and record the result */
601 	reg_val = __ccu_read(ccu, div->u.s.offset);
602 	reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
603 					reg_div);
604 	__ccu_write(ccu, div->u.s.offset, reg_val);
605 
606 	/* If the trigger fails we still want to disable the gate */
607 	if (!__clk_trigger(ccu, trig))
608 		ret = -EIO;
609 
610 	/* Disable the clock again if it was disabled to begin with */
611 	if (!enabled && !__clk_gate(ccu, gate, false))
612 		ret = ret ? ret : -ENXIO;	/* return first error */
613 out:
614 	return ret;
615 }
616 
617 /*
618  * Initialize a divider by committing our desired state to hardware
619  * without the usual checks to see if it's already set up that way.
620  * Returns true if successful, false otherwise.
621  */
div_init(struct ccu_data * ccu,struct bcm_clk_gate * gate,struct bcm_clk_div * div,struct bcm_clk_trig * trig)622 static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
623 			struct bcm_clk_div *div, struct bcm_clk_trig *trig)
624 {
625 	if (!divider_exists(div) || divider_is_fixed(div))
626 		return true;
627 	return !__div_commit(ccu, gate, div, trig);
628 }
629 
divider_write(struct ccu_data * ccu,struct bcm_clk_gate * gate,struct bcm_clk_div * div,struct bcm_clk_trig * trig,u64 scaled_div)630 static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
631 			struct bcm_clk_div *div, struct bcm_clk_trig *trig,
632 			u64 scaled_div)
633 {
634 	unsigned long flags;
635 	u64 previous;
636 	int ret;
637 
638 	BUG_ON(divider_is_fixed(div));
639 
640 	previous = div->u.s.scaled_div;
641 	if (previous == scaled_div)
642 		return 0;	/* No change */
643 
644 	div->u.s.scaled_div = scaled_div;
645 
646 	flags = ccu_lock(ccu);
647 	__ccu_write_enable(ccu);
648 
649 	ret = __div_commit(ccu, gate, div, trig);
650 
651 	__ccu_write_disable(ccu);
652 	ccu_unlock(ccu, flags);
653 
654 	if (ret)
655 		div->u.s.scaled_div = previous;		/* Revert the change */
656 
657 	return ret;
658 
659 }
660 
661 /* Common clock rate helpers */
662 
663 /*
664  * Implement the common clock framework recalc_rate method, taking
665  * into account a divider and an optional pre-divider.  The
666  * pre-divider register pointer may be NULL.
667  */
clk_recalc_rate(struct ccu_data * ccu,struct bcm_clk_div * div,struct bcm_clk_div * pre_div,unsigned long parent_rate)668 static unsigned long clk_recalc_rate(struct ccu_data *ccu,
669 			struct bcm_clk_div *div, struct bcm_clk_div *pre_div,
670 			unsigned long parent_rate)
671 {
672 	u64 scaled_parent_rate;
673 	u64 scaled_div;
674 	u64 result;
675 
676 	if (!divider_exists(div))
677 		return parent_rate;
678 
679 	if (parent_rate > (unsigned long)LONG_MAX)
680 		return 0;	/* actually this would be a caller bug */
681 
682 	/*
683 	 * If there is a pre-divider, divide the scaled parent rate
684 	 * by the pre-divider value first.  In this case--to improve
685 	 * accuracy--scale the parent rate by *both* the pre-divider
686 	 * value and the divider before actually computing the
687 	 * result of the pre-divider.
688 	 *
689 	 * If there's only one divider, just scale the parent rate.
690 	 */
691 	if (pre_div && divider_exists(pre_div)) {
692 		u64 scaled_rate;
693 
694 		scaled_rate = scale_rate(pre_div, parent_rate);
695 		scaled_rate = scale_rate(div, scaled_rate);
696 		scaled_div = divider_read_scaled(ccu, pre_div);
697 		scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
698 							scaled_div);
699 	} else  {
700 		scaled_parent_rate = scale_rate(div, parent_rate);
701 	}
702 
703 	/*
704 	 * Get the scaled divisor value, and divide the scaled
705 	 * parent rate by that to determine this clock's resulting
706 	 * rate.
707 	 */
708 	scaled_div = divider_read_scaled(ccu, div);
709 	result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, scaled_div);
710 
711 	return (unsigned long)result;
712 }
713 
714 /*
715  * Compute the output rate produced when a given parent rate is fed
716  * into two dividers.  The pre-divider can be NULL, and even if it's
717  * non-null it may be nonexistent.  It's also OK for the divider to
718  * be nonexistent, and in that case the pre-divider is also ignored.
719  *
720  * If scaled_div is non-null, it is used to return the scaled divisor
721  * value used by the (downstream) divider to produce that rate.
722  */
round_rate(struct ccu_data * ccu,struct bcm_clk_div * div,struct bcm_clk_div * pre_div,unsigned long rate,unsigned long parent_rate,u64 * scaled_div)723 static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
724 				struct bcm_clk_div *pre_div,
725 				unsigned long rate, unsigned long parent_rate,
726 				u64 *scaled_div)
727 {
728 	u64 scaled_parent_rate;
729 	u64 min_scaled_div;
730 	u64 max_scaled_div;
731 	u64 best_scaled_div;
732 	u64 result;
733 
734 	BUG_ON(!divider_exists(div));
735 	BUG_ON(!rate);
736 	BUG_ON(parent_rate > (u64)LONG_MAX);
737 
738 	/*
739 	 * If there is a pre-divider, divide the scaled parent rate
740 	 * by the pre-divider value first.  In this case--to improve
741 	 * accuracy--scale the parent rate by *both* the pre-divider
742 	 * value and the divider before actually computing the
743 	 * result of the pre-divider.
744 	 *
745 	 * If there's only one divider, just scale the parent rate.
746 	 *
747 	 * For simplicity we treat the pre-divider as fixed (for now).
748 	 */
749 	if (divider_exists(pre_div)) {
750 		u64 scaled_rate;
751 		u64 scaled_pre_div;
752 
753 		scaled_rate = scale_rate(pre_div, parent_rate);
754 		scaled_rate = scale_rate(div, scaled_rate);
755 		scaled_pre_div = divider_read_scaled(ccu, pre_div);
756 		scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
757 							scaled_pre_div);
758 	} else {
759 		scaled_parent_rate = scale_rate(div, parent_rate);
760 	}
761 
762 	/*
763 	 * Compute the best possible divider and ensure it is in
764 	 * range.  A fixed divider can't be changed, so just report
765 	 * the best we can do.
766 	 */
767 	if (!divider_is_fixed(div)) {
768 		best_scaled_div = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate,
769 							rate);
770 		min_scaled_div = scaled_div_min(div);
771 		max_scaled_div = scaled_div_max(div);
772 		if (best_scaled_div > max_scaled_div)
773 			best_scaled_div = max_scaled_div;
774 		else if (best_scaled_div < min_scaled_div)
775 			best_scaled_div = min_scaled_div;
776 	} else {
777 		best_scaled_div = divider_read_scaled(ccu, div);
778 	}
779 
780 	/* OK, figure out the resulting rate */
781 	result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, best_scaled_div);
782 
783 	if (scaled_div)
784 		*scaled_div = best_scaled_div;
785 
786 	return (long)result;
787 }
788 
789 /* Common clock parent helpers */
790 
791 /*
792  * For a given parent selector (register field) value, find the
793  * index into a selector's parent_sel array that contains it.
794  * Returns the index, or BAD_CLK_INDEX if it's not found.
795  */
parent_index(struct bcm_clk_sel * sel,u8 parent_sel)796 static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel)
797 {
798 	u8 i;
799 
800 	BUG_ON(sel->parent_count > (u32)U8_MAX);
801 	for (i = 0; i < sel->parent_count; i++)
802 		if (sel->parent_sel[i] == parent_sel)
803 			return i;
804 	return BAD_CLK_INDEX;
805 }
806 
807 /*
808  * Fetch the current value of the selector, and translate that into
809  * its corresponding index in the parent array we registered with
810  * the clock framework.
811  *
812  * Returns parent array index that corresponds with the value found,
813  * or BAD_CLK_INDEX if the found value is out of range.
814  */
selector_read_index(struct ccu_data * ccu,struct bcm_clk_sel * sel)815 static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel)
816 {
817 	unsigned long flags;
818 	u32 reg_val;
819 	u32 parent_sel;
820 	u8 index;
821 
822 	/* If there's no selector, there's only one parent */
823 	if (!selector_exists(sel))
824 		return 0;
825 
826 	/* Get the value in the selector register */
827 	flags = ccu_lock(ccu);
828 	reg_val = __ccu_read(ccu, sel->offset);
829 	ccu_unlock(ccu, flags);
830 
831 	parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
832 
833 	/* Look up that selector's parent array index and return it */
834 	index = parent_index(sel, parent_sel);
835 	if (index == BAD_CLK_INDEX)
836 		pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n",
837 			__func__, parent_sel, ccu->name, sel->offset);
838 
839 	return index;
840 }
841 
842 /*
843  * Commit our desired selector value to the hardware.
844  *
845  * Returns 0 on success.  Returns -EINVAL for invalid arguments.
846  * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
847  */
848 static int
__sel_commit(struct ccu_data * ccu,struct bcm_clk_gate * gate,struct bcm_clk_sel * sel,struct bcm_clk_trig * trig)849 __sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
850 			struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
851 {
852 	u32 parent_sel;
853 	u32 reg_val;
854 	bool enabled;
855 	int ret = 0;
856 
857 	BUG_ON(!selector_exists(sel));
858 
859 	/*
860 	 * If we're just initializing the selector, and no initial
861 	 * state was defined in the device tree, we just find out
862 	 * what its current value is rather than updating it.
863 	 */
864 	if (sel->clk_index == BAD_CLK_INDEX) {
865 		u8 index;
866 
867 		reg_val = __ccu_read(ccu, sel->offset);
868 		parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
869 		index = parent_index(sel, parent_sel);
870 		if (index == BAD_CLK_INDEX)
871 			return -EINVAL;
872 		sel->clk_index = index;
873 
874 		return 0;
875 	}
876 
877 	BUG_ON((u32)sel->clk_index >= sel->parent_count);
878 	parent_sel = sel->parent_sel[sel->clk_index];
879 
880 	/* Clock needs to be enabled before changing the parent */
881 	enabled = __is_clk_gate_enabled(ccu, gate);
882 	if (!enabled && !__clk_gate(ccu, gate, true))
883 		return -ENXIO;
884 
885 	/* Replace the selector value and record the result */
886 	reg_val = __ccu_read(ccu, sel->offset);
887 	reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel);
888 	__ccu_write(ccu, sel->offset, reg_val);
889 
890 	/* If the trigger fails we still want to disable the gate */
891 	if (!__clk_trigger(ccu, trig))
892 		ret = -EIO;
893 
894 	/* Disable the clock again if it was disabled to begin with */
895 	if (!enabled && !__clk_gate(ccu, gate, false))
896 		ret = ret ? ret : -ENXIO;	/* return first error */
897 
898 	return ret;
899 }
900 
901 /*
902  * Initialize a selector by committing our desired state to hardware
903  * without the usual checks to see if it's already set up that way.
904  * Returns true if successful, false otherwise.
905  */
sel_init(struct ccu_data * ccu,struct bcm_clk_gate * gate,struct bcm_clk_sel * sel,struct bcm_clk_trig * trig)906 static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
907 			struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
908 {
909 	if (!selector_exists(sel))
910 		return true;
911 	return !__sel_commit(ccu, gate, sel, trig);
912 }
913 
914 /*
915  * Write a new value into a selector register to switch to a
916  * different parent clock.  Returns 0 on success, or an error code
917  * (from __sel_commit()) otherwise.
918  */
selector_write(struct ccu_data * ccu,struct bcm_clk_gate * gate,struct bcm_clk_sel * sel,struct bcm_clk_trig * trig,u8 index)919 static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
920 			struct bcm_clk_sel *sel, struct bcm_clk_trig *trig,
921 			u8 index)
922 {
923 	unsigned long flags;
924 	u8 previous;
925 	int ret;
926 
927 	previous = sel->clk_index;
928 	if (previous == index)
929 		return 0;	/* No change */
930 
931 	sel->clk_index = index;
932 
933 	flags = ccu_lock(ccu);
934 	__ccu_write_enable(ccu);
935 
936 	ret = __sel_commit(ccu, gate, sel, trig);
937 
938 	__ccu_write_disable(ccu);
939 	ccu_unlock(ccu, flags);
940 
941 	if (ret)
942 		sel->clk_index = previous;	/* Revert the change */
943 
944 	return ret;
945 }
946 
947 /* Clock operations */
948 
kona_peri_clk_enable(struct clk_hw * hw)949 static int kona_peri_clk_enable(struct clk_hw *hw)
950 {
951 	struct kona_clk *bcm_clk = to_kona_clk(hw);
952 	struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
953 
954 	return clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, true);
955 }
956 
kona_peri_clk_disable(struct clk_hw * hw)957 static void kona_peri_clk_disable(struct clk_hw *hw)
958 {
959 	struct kona_clk *bcm_clk = to_kona_clk(hw);
960 	struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
961 
962 	(void)clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, false);
963 }
964 
kona_peri_clk_is_enabled(struct clk_hw * hw)965 static int kona_peri_clk_is_enabled(struct clk_hw *hw)
966 {
967 	struct kona_clk *bcm_clk = to_kona_clk(hw);
968 	struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
969 
970 	return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
971 }
972 
kona_peri_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)973 static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
974 			unsigned long parent_rate)
975 {
976 	struct kona_clk *bcm_clk = to_kona_clk(hw);
977 	struct peri_clk_data *data = bcm_clk->u.peri;
978 
979 	return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
980 				parent_rate);
981 }
982 
kona_peri_clk_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)983 static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
984 			unsigned long *parent_rate)
985 {
986 	struct kona_clk *bcm_clk = to_kona_clk(hw);
987 	struct bcm_clk_div *div = &bcm_clk->u.peri->div;
988 
989 	if (!divider_exists(div))
990 		return clk_hw_get_rate(hw);
991 
992 	/* Quietly avoid a zero rate */
993 	return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
994 				rate ? rate : 1, *parent_rate, NULL);
995 }
996 
kona_peri_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)997 static int kona_peri_clk_determine_rate(struct clk_hw *hw,
998 					struct clk_rate_request *req)
999 {
1000 	struct kona_clk *bcm_clk = to_kona_clk(hw);
1001 	struct clk_hw *current_parent;
1002 	unsigned long parent_rate;
1003 	unsigned long best_delta;
1004 	unsigned long best_rate;
1005 	u32 parent_count;
1006 	long rate;
1007 	u32 which;
1008 
1009 	/*
1010 	 * If there is no other parent to choose, use the current one.
1011 	 * Note:  We don't honor (or use) CLK_SET_RATE_NO_REPARENT.
1012 	 */
1013 	WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT);
1014 	parent_count = (u32)bcm_clk->init_data.num_parents;
1015 	if (parent_count < 2) {
1016 		rate = kona_peri_clk_round_rate(hw, req->rate,
1017 						&req->best_parent_rate);
1018 		if (rate < 0)
1019 			return rate;
1020 
1021 		req->rate = rate;
1022 		return 0;
1023 	}
1024 
1025 	/* Unless we can do better, stick with current parent */
1026 	current_parent = clk_hw_get_parent(hw);
1027 	parent_rate = clk_hw_get_rate(current_parent);
1028 	best_rate = kona_peri_clk_round_rate(hw, req->rate, &parent_rate);
1029 	best_delta = abs(best_rate - req->rate);
1030 
1031 	/* Check whether any other parent clock can produce a better result */
1032 	for (which = 0; which < parent_count; which++) {
1033 		struct clk_hw *parent = clk_hw_get_parent_by_index(hw, which);
1034 		unsigned long delta;
1035 		unsigned long other_rate;
1036 
1037 		BUG_ON(!parent);
1038 		if (parent == current_parent)
1039 			continue;
1040 
1041 		/* We don't support CLK_SET_RATE_PARENT */
1042 		parent_rate = clk_hw_get_rate(parent);
1043 		other_rate = kona_peri_clk_round_rate(hw, req->rate,
1044 						      &parent_rate);
1045 		delta = abs(other_rate - req->rate);
1046 		if (delta < best_delta) {
1047 			best_delta = delta;
1048 			best_rate = other_rate;
1049 			req->best_parent_hw = parent;
1050 			req->best_parent_rate = parent_rate;
1051 		}
1052 	}
1053 
1054 	req->rate = best_rate;
1055 	return 0;
1056 }
1057 
kona_peri_clk_set_parent(struct clk_hw * hw,u8 index)1058 static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
1059 {
1060 	struct kona_clk *bcm_clk = to_kona_clk(hw);
1061 	struct peri_clk_data *data = bcm_clk->u.peri;
1062 	struct bcm_clk_sel *sel = &data->sel;
1063 	struct bcm_clk_trig *trig;
1064 	int ret;
1065 
1066 	BUG_ON(index >= sel->parent_count);
1067 
1068 	/* If there's only one parent we don't require a selector */
1069 	if (!selector_exists(sel))
1070 		return 0;
1071 
1072 	/*
1073 	 * The regular trigger is used by default, but if there's a
1074 	 * pre-trigger we want to use that instead.
1075 	 */
1076 	trig = trigger_exists(&data->pre_trig) ? &data->pre_trig
1077 					       : &data->trig;
1078 
1079 	ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index);
1080 	if (ret == -ENXIO) {
1081 		pr_err("%s: gating failure for %s\n", __func__,
1082 			bcm_clk->init_data.name);
1083 		ret = -EIO;	/* Don't proliferate weird errors */
1084 	} else if (ret == -EIO) {
1085 		pr_err("%s: %strigger failed for %s\n", __func__,
1086 			trig == &data->pre_trig ? "pre-" : "",
1087 			bcm_clk->init_data.name);
1088 	}
1089 
1090 	return ret;
1091 }
1092 
kona_peri_clk_get_parent(struct clk_hw * hw)1093 static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
1094 {
1095 	struct kona_clk *bcm_clk = to_kona_clk(hw);
1096 	struct peri_clk_data *data = bcm_clk->u.peri;
1097 	u8 index;
1098 
1099 	index = selector_read_index(bcm_clk->ccu, &data->sel);
1100 
1101 	/* Not all callers would handle an out-of-range value gracefully */
1102 	return index == BAD_CLK_INDEX ? 0 : index;
1103 }
1104 
kona_peri_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)1105 static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
1106 			unsigned long parent_rate)
1107 {
1108 	struct kona_clk *bcm_clk = to_kona_clk(hw);
1109 	struct peri_clk_data *data = bcm_clk->u.peri;
1110 	struct bcm_clk_div *div = &data->div;
1111 	u64 scaled_div = 0;
1112 	int ret;
1113 
1114 	if (parent_rate > (unsigned long)LONG_MAX)
1115 		return -EINVAL;
1116 
1117 	if (rate == clk_hw_get_rate(hw))
1118 		return 0;
1119 
1120 	if (!divider_exists(div))
1121 		return rate == parent_rate ? 0 : -EINVAL;
1122 
1123 	/*
1124 	 * A fixed divider can't be changed.  (Nor can a fixed
1125 	 * pre-divider be, but for now we never actually try to
1126 	 * change that.)  Tolerate a request for a no-op change.
1127 	 */
1128 	if (divider_is_fixed(&data->div))
1129 		return rate == parent_rate ? 0 : -EINVAL;
1130 
1131 	/*
1132 	 * Get the scaled divisor value needed to achieve a clock
1133 	 * rate as close as possible to what was requested, given
1134 	 * the parent clock rate supplied.
1135 	 */
1136 	(void)round_rate(bcm_clk->ccu, div, &data->pre_div,
1137 				rate ? rate : 1, parent_rate, &scaled_div);
1138 
1139 	/*
1140 	 * We aren't updating any pre-divider at this point, so
1141 	 * we'll use the regular trigger.
1142 	 */
1143 	ret = divider_write(bcm_clk->ccu, &data->gate, &data->div,
1144 				&data->trig, scaled_div);
1145 	if (ret == -ENXIO) {
1146 		pr_err("%s: gating failure for %s\n", __func__,
1147 			bcm_clk->init_data.name);
1148 		ret = -EIO;	/* Don't proliferate weird errors */
1149 	} else if (ret == -EIO) {
1150 		pr_err("%s: trigger failed for %s\n", __func__,
1151 			bcm_clk->init_data.name);
1152 	}
1153 
1154 	return ret;
1155 }
1156 
1157 struct clk_ops kona_peri_clk_ops = {
1158 	.enable = kona_peri_clk_enable,
1159 	.disable = kona_peri_clk_disable,
1160 	.is_enabled = kona_peri_clk_is_enabled,
1161 	.recalc_rate = kona_peri_clk_recalc_rate,
1162 	.determine_rate = kona_peri_clk_determine_rate,
1163 	.set_parent = kona_peri_clk_set_parent,
1164 	.get_parent = kona_peri_clk_get_parent,
1165 	.set_rate = kona_peri_clk_set_rate,
1166 };
1167 
1168 /* Put a peripheral clock into its initial state */
__peri_clk_init(struct kona_clk * bcm_clk)1169 static bool __peri_clk_init(struct kona_clk *bcm_clk)
1170 {
1171 	struct ccu_data *ccu = bcm_clk->ccu;
1172 	struct peri_clk_data *peri = bcm_clk->u.peri;
1173 	const char *name = bcm_clk->init_data.name;
1174 	struct bcm_clk_trig *trig;
1175 
1176 	BUG_ON(bcm_clk->type != bcm_clk_peri);
1177 
1178 	if (!policy_init(ccu, &peri->policy)) {
1179 		pr_err("%s: error initializing policy for %s\n",
1180 			__func__, name);
1181 		return false;
1182 	}
1183 	if (!gate_init(ccu, &peri->gate)) {
1184 		pr_err("%s: error initializing gate for %s\n", __func__, name);
1185 		return false;
1186 	}
1187 	if (!hyst_init(ccu, &peri->hyst)) {
1188 		pr_err("%s: error initializing hyst for %s\n", __func__, name);
1189 		return false;
1190 	}
1191 	if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) {
1192 		pr_err("%s: error initializing divider for %s\n", __func__,
1193 			name);
1194 		return false;
1195 	}
1196 
1197 	/*
1198 	 * For the pre-divider and selector, the pre-trigger is used
1199 	 * if it's present, otherwise we just use the regular trigger.
1200 	 */
1201 	trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig
1202 					       : &peri->trig;
1203 
1204 	if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) {
1205 		pr_err("%s: error initializing pre-divider for %s\n", __func__,
1206 			name);
1207 		return false;
1208 	}
1209 
1210 	if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) {
1211 		pr_err("%s: error initializing selector for %s\n", __func__,
1212 			name);
1213 		return false;
1214 	}
1215 
1216 	return true;
1217 }
1218 
__kona_clk_init(struct kona_clk * bcm_clk)1219 static bool __kona_clk_init(struct kona_clk *bcm_clk)
1220 {
1221 	switch (bcm_clk->type) {
1222 	case bcm_clk_peri:
1223 		return __peri_clk_init(bcm_clk);
1224 	default:
1225 		BUG();
1226 	}
1227 	return false;
1228 }
1229 
1230 /* Set a CCU and all its clocks into their desired initial state */
kona_ccu_init(struct ccu_data * ccu)1231 bool __init kona_ccu_init(struct ccu_data *ccu)
1232 {
1233 	unsigned long flags;
1234 	unsigned int which;
1235 	struct kona_clk *kona_clks = ccu->kona_clks;
1236 	bool success = true;
1237 
1238 	flags = ccu_lock(ccu);
1239 	__ccu_write_enable(ccu);
1240 
1241 	for (which = 0; which < ccu->clk_num; which++) {
1242 		struct kona_clk *bcm_clk = &kona_clks[which];
1243 
1244 		if (!bcm_clk->ccu)
1245 			continue;
1246 
1247 		success &= __kona_clk_init(bcm_clk);
1248 	}
1249 
1250 	__ccu_write_disable(ccu);
1251 	ccu_unlock(ccu, flags);
1252 	return success;
1253 }
1254