Lines Matching full:ccu

16  * CCU.  (I believe these polices are named "Deep Sleep", "Economy",
119 /* CCU access */
121 /* Read a 32-bit register value from a CCU's address space. */
122 static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset) in __ccu_read() argument
124 return readl(ccu->base + reg_offset); in __ccu_read()
127 /* Write a 32-bit register value into a CCU's address space. */
129 __ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val) in __ccu_write() argument
131 writel(reg_val, ccu->base + reg_offset); in __ccu_write()
134 static inline unsigned long ccu_lock(struct ccu_data *ccu) in ccu_lock() argument
138 spin_lock_irqsave(&ccu->lock, flags); in ccu_lock()
142 static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags) in ccu_unlock() argument
144 spin_unlock_irqrestore(&ccu->lock, flags); in ccu_unlock()
148 * Enable/disable write access to CCU protected registers. The
151 static inline void __ccu_write_enable(struct ccu_data *ccu) in __ccu_write_enable() argument
153 if (ccu->write_enabled) { in __ccu_write_enable()
155 ccu->name); in __ccu_write_enable()
158 ccu->write_enabled = true; in __ccu_write_enable()
159 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1); in __ccu_write_enable()
162 static inline void __ccu_write_disable(struct ccu_data *ccu) in __ccu_write_disable() argument
164 if (!ccu->write_enabled) { in __ccu_write_disable()
166 ccu->name); in __ccu_write_disable()
170 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD); in __ccu_write_disable()
171 ccu->write_enabled = false; in __ccu_write_disable()
175 * Poll a register in a CCU's address space, returning when the
180 * Caller must ensure the CCU lock is held.
183 __ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want) in __ccu_wait_bit() argument
192 val = __ccu_read(ccu, reg_offset); in __ccu_wait_bit()
199 ccu->name, reg_offset, bit, want ? "set" : "clear"); in __ccu_wait_bit()
206 static bool __ccu_policy_engine_start(struct ccu_data *ccu, bool sync) in __ccu_policy_engine_start() argument
208 struct bcm_policy_ctl *control = &ccu->policy.control; in __ccu_policy_engine_start()
214 /* If we don't need to control policy for this CCU, we're done. */ in __ccu_policy_engine_start()
222 ret = __ccu_wait_bit(ccu, offset, go_bit, false); in __ccu_policy_engine_start()
224 pr_err("%s: ccu %s policy engine wouldn't go idle\n", in __ccu_policy_engine_start()
225 __func__, ccu->name); in __ccu_policy_engine_start()
237 * load is copied to the active load and the CCU frequency in __ccu_policy_engine_start()
249 __ccu_write(ccu, offset, mask); in __ccu_policy_engine_start()
252 ret = __ccu_wait_bit(ccu, offset, go_bit, false); in __ccu_policy_engine_start()
254 pr_err("%s: ccu %s policy engine never started\n", in __ccu_policy_engine_start()
255 __func__, ccu->name); in __ccu_policy_engine_start()
260 static bool __ccu_policy_engine_stop(struct ccu_data *ccu) in __ccu_policy_engine_stop() argument
262 struct bcm_lvm_en *enable = &ccu->policy.enable; in __ccu_policy_engine_stop()
267 /* If we don't need to control policy for this CCU, we're done. */ in __ccu_policy_engine_stop()
274 ret = __ccu_wait_bit(ccu, offset, enable_bit, false); in __ccu_policy_engine_stop()
276 pr_err("%s: ccu %s policy engine already stopped\n", in __ccu_policy_engine_stop()
277 __func__, ccu->name); in __ccu_policy_engine_stop()
282 __ccu_write(ccu, offset, (u32)1 << enable_bit); in __ccu_policy_engine_stop()
285 ret = __ccu_wait_bit(ccu, offset, enable_bit, false); in __ccu_policy_engine_stop()
287 pr_err("%s: ccu %s policy engine never stopped\n", in __ccu_policy_engine_stop()
288 __func__, ccu->name); in __ccu_policy_engine_stop()
294 * A CCU has four operating conditions ("policies"), and some clocks
302 static bool policy_init(struct ccu_data *ccu, struct bcm_clk_policy *policy) in policy_init() argument
313 * We need to stop the CCU policy engine to allow update in policy_init()
316 if (!__ccu_policy_engine_stop(ccu)) { in policy_init()
317 pr_err("%s: unable to stop CCU %s policy engine\n", in policy_init()
318 __func__, ccu->name); in policy_init()
331 reg_val = __ccu_read(ccu, offset); in policy_init()
333 __ccu_write(ccu, offset, reg_val); in policy_init()
338 ret = __ccu_policy_engine_start(ccu, true); in policy_init()
340 pr_err("%s: unable to restart CCU %s policy engine\n", in policy_init()
341 __func__, ccu->name); in policy_init()
348 /* Determine whether a clock is gated. CCU lock must be held. */
350 __is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate) in __is_clk_gate_enabled() argument
360 reg_val = __ccu_read(ccu, gate->offset); in __is_clk_gate_enabled()
367 is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate) in is_clk_gate_enabled() argument
376 flags = ccu_lock(ccu); in is_clk_gate_enabled()
377 ret = __is_clk_gate_enabled(ccu, gate); in is_clk_gate_enabled()
378 ccu_unlock(ccu, flags); in is_clk_gate_enabled()
388 __gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate) in __gate_commit() argument
398 reg_val = __ccu_read(ccu, gate->offset); in __gate_commit()
423 __ccu_write(ccu, gate->offset, reg_val); in __gate_commit()
430 return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled); in __gate_commit()
439 static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate) in gate_init() argument
443 return __gate_commit(ccu, gate); in gate_init()
450 * otherwise. CCU lock must be held.
453 __clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable) in __clk_gate() argument
470 ret = __gate_commit(ccu, gate); in __clk_gate()
478 static int clk_gate(struct ccu_data *ccu, const char *name, in clk_gate() argument
493 flags = ccu_lock(ccu); in clk_gate()
494 __ccu_write_enable(ccu); in clk_gate()
496 success = __clk_gate(ccu, gate, enable); in clk_gate()
498 __ccu_write_disable(ccu); in clk_gate()
499 ccu_unlock(ccu, flags); in clk_gate()
519 static bool hyst_init(struct ccu_data *ccu, struct bcm_clk_hyst *hyst) in hyst_init() argument
532 reg_val = __ccu_read(ccu, offset); in hyst_init()
534 __ccu_write(ccu, offset, reg_val); in hyst_init()
542 * Caller must ensure CCU lock is held and access is enabled.
545 static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig) in __clk_trigger() argument
548 __ccu_write(ccu, trig->offset, 1 << trig->bit); in __clk_trigger()
550 return __ccu_wait_bit(ccu, trig->offset, trig->bit, false); in __clk_trigger()
556 static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div) in divider_read_scaled() argument
565 flags = ccu_lock(ccu); in divider_read_scaled()
566 reg_val = __ccu_read(ccu, div->u.s.offset); in divider_read_scaled()
567 ccu_unlock(ccu, flags); in divider_read_scaled()
583 static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, in __div_commit() argument
599 reg_val = __ccu_read(ccu, div->u.s.offset); in __div_commit()
611 enabled = __is_clk_gate_enabled(ccu, gate); in __div_commit()
612 if (!enabled && !__clk_gate(ccu, gate, true)) { in __div_commit()
618 reg_val = __ccu_read(ccu, div->u.s.offset); in __div_commit()
621 __ccu_write(ccu, div->u.s.offset, reg_val); in __div_commit()
624 if (!__clk_trigger(ccu, trig)) in __div_commit()
628 if (!enabled && !__clk_gate(ccu, gate, false)) in __div_commit()
639 static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate, in div_init() argument
644 return !__div_commit(ccu, gate, div, trig); in div_init()
647 static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, in divider_write() argument
663 flags = ccu_lock(ccu); in divider_write()
664 __ccu_write_enable(ccu); in divider_write()
666 ret = __div_commit(ccu, gate, div, trig); in divider_write()
668 __ccu_write_disable(ccu); in divider_write()
669 ccu_unlock(ccu, flags); in divider_write()
685 static unsigned long clk_recalc_rate(struct ccu_data *ccu, in clk_recalc_rate() argument
713 scaled_div = divider_read_scaled(ccu, pre_div); in clk_recalc_rate()
725 scaled_div = divider_read_scaled(ccu, div); in clk_recalc_rate()
740 static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div, in round_rate() argument
772 scaled_pre_div = divider_read_scaled(ccu, pre_div); in round_rate()
794 best_scaled_div = divider_read_scaled(ccu, div); in round_rate()
832 static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel) in selector_read_index() argument
844 flags = ccu_lock(ccu); in selector_read_index()
845 reg_val = __ccu_read(ccu, sel->offset); in selector_read_index()
846 ccu_unlock(ccu, flags); in selector_read_index()
854 __func__, parent_sel, ccu->name, sel->offset); in selector_read_index()
866 __sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, in __sel_commit() argument
884 reg_val = __ccu_read(ccu, sel->offset); in __sel_commit()
898 enabled = __is_clk_gate_enabled(ccu, gate); in __sel_commit()
899 if (!enabled && !__clk_gate(ccu, gate, true)) in __sel_commit()
903 reg_val = __ccu_read(ccu, sel->offset); in __sel_commit()
905 __ccu_write(ccu, sel->offset, reg_val); in __sel_commit()
908 if (!__clk_trigger(ccu, trig)) in __sel_commit()
912 if (!enabled && !__clk_gate(ccu, gate, false)) in __sel_commit()
923 static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate, in sel_init() argument
928 return !__sel_commit(ccu, gate, sel, trig); in sel_init()
936 static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, in selector_write() argument
950 flags = ccu_lock(ccu); in selector_write()
951 __ccu_write_enable(ccu); in selector_write()
953 ret = __sel_commit(ccu, gate, sel, trig); in selector_write()
955 __ccu_write_disable(ccu); in selector_write()
956 ccu_unlock(ccu, flags); in selector_write()
971 return clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, true); in kona_peri_clk_enable()
979 (void)clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, false); in kona_peri_clk_disable()
987 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; in kona_peri_clk_is_enabled()
996 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, in kona_peri_clk_recalc_rate()
1010 return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div, in kona_peri_clk_round_rate()
1096 ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index); in kona_peri_clk_set_parent()
1116 index = selector_read_index(bcm_clk->ccu, &data->sel); in kona_peri_clk_get_parent()
1153 (void)round_rate(bcm_clk->ccu, div, &data->pre_div, in kona_peri_clk_set_rate()
1160 ret = divider_write(bcm_clk->ccu, &data->gate, &data->div, in kona_peri_clk_set_rate()
1188 struct ccu_data *ccu = bcm_clk->ccu; in __peri_clk_init() local
1195 if (!policy_init(ccu, &peri->policy)) { in __peri_clk_init()
1200 if (!gate_init(ccu, &peri->gate)) { in __peri_clk_init()
1204 if (!hyst_init(ccu, &peri->hyst)) { in __peri_clk_init()
1208 if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) { in __peri_clk_init()
1221 if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) { in __peri_clk_init()
1227 if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) { in __peri_clk_init()
1247 /* Set a CCU and all its clocks into their desired initial state */
1248 bool __init kona_ccu_init(struct ccu_data *ccu) in kona_ccu_init() argument
1252 struct kona_clk *kona_clks = ccu->kona_clks; in kona_ccu_init()
1255 flags = ccu_lock(ccu); in kona_ccu_init()
1256 __ccu_write_enable(ccu); in kona_ccu_init()
1258 for (which = 0; which < ccu->clk_num; which++) { in kona_ccu_init()
1261 if (!bcm_clk->ccu) in kona_ccu_init()
1267 __ccu_write_disable(ccu); in kona_ccu_init()
1268 ccu_unlock(ccu, flags); in kona_ccu_init()