1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd
4 */
5
6 #include <linux/clk-provider.h>
7 #include <linux/io.h>
8 #include <linux/slab.h>
9 #include "clk.h"
10
11 #define div_mask(width) ((1 << (width)) - 1)
12
_is_best_half_div(unsigned long rate,unsigned long now,unsigned long best,unsigned long flags)13 static bool _is_best_half_div(unsigned long rate, unsigned long now,
14 unsigned long best, unsigned long flags)
15 {
16 if (flags & CLK_DIVIDER_ROUND_CLOSEST)
17 return abs(rate - now) < abs(rate - best);
18
19 return now <= rate && now > best;
20 }
21
clk_half_divider_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)22 static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw,
23 unsigned long parent_rate)
24 {
25 struct clk_divider *divider = to_clk_divider(hw);
26 unsigned int val;
27
28 val = readl(divider->reg) >> divider->shift;
29 val &= div_mask(divider->width);
30 val = val * 2 + 3;
31
32 return DIV_ROUND_UP_ULL(((u64)parent_rate * 2), val);
33 }
34
clk_half_divider_bestdiv(struct clk_hw * hw,unsigned long rate,unsigned long * best_parent_rate,u8 width,unsigned long flags)35 static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
36 unsigned long *best_parent_rate, u8 width,
37 unsigned long flags)
38 {
39 unsigned int i, bestdiv = 0;
40 unsigned long parent_rate, best = 0, now, maxdiv;
41 unsigned long parent_rate_saved = *best_parent_rate;
42
43 if (!rate)
44 rate = 1;
45
46 maxdiv = div_mask(width);
47
48 if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
49 parent_rate = *best_parent_rate;
50 bestdiv = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
51 if (bestdiv < 3)
52 bestdiv = 0;
53 else
54 bestdiv = (bestdiv - 3) / 2;
55 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
56 return bestdiv;
57 }
58
59 /*
60 * The maximum divider we can use without overflowing
61 * unsigned long in rate * i below
62 */
63 maxdiv = min(ULONG_MAX / rate, maxdiv);
64
65 for (i = 0; i <= maxdiv; i++) {
66 if (((u64)rate * (i * 2 + 3)) == ((u64)parent_rate_saved * 2)) {
67 /*
68 * It's the most ideal case if the requested rate can be
69 * divided from parent clock without needing to change
70 * parent rate, so return the divider immediately.
71 */
72 *best_parent_rate = parent_rate_saved;
73 return i;
74 }
75 parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
76 ((u64)rate * (i * 2 + 3)) / 2);
77 now = DIV_ROUND_UP_ULL(((u64)parent_rate * 2),
78 (i * 2 + 3));
79
80 if (_is_best_half_div(rate, now, best, flags)) {
81 bestdiv = i;
82 best = now;
83 *best_parent_rate = parent_rate;
84 }
85 }
86
87 if (!bestdiv) {
88 bestdiv = div_mask(width);
89 *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
90 }
91
92 return bestdiv;
93 }
94
clk_half_divider_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)95 static int clk_half_divider_determine_rate(struct clk_hw *hw,
96 struct clk_rate_request *req)
97 {
98 struct clk_divider *divider = to_clk_divider(hw);
99 int div;
100
101 div = clk_half_divider_bestdiv(hw, req->rate, &req->best_parent_rate,
102 divider->width,
103 divider->flags);
104
105 req->rate = DIV_ROUND_UP_ULL(((u64)req->best_parent_rate * 2), div * 2 + 3);
106
107 return 0;
108 }
109
clk_half_divider_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)110 static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
111 unsigned long parent_rate)
112 {
113 struct clk_divider *divider = to_clk_divider(hw);
114 unsigned int value;
115 unsigned long flags = 0;
116 u32 val;
117
118 value = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
119 value = (value - 3) / 2;
120 value = min_t(unsigned int, value, div_mask(divider->width));
121
122 if (divider->lock)
123 spin_lock_irqsave(divider->lock, flags);
124 else
125 __acquire(divider->lock);
126
127 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
128 val = div_mask(divider->width) << (divider->shift + 16);
129 } else {
130 val = readl(divider->reg);
131 val &= ~(div_mask(divider->width) << divider->shift);
132 }
133 val |= value << divider->shift;
134 writel(val, divider->reg);
135
136 if (divider->lock)
137 spin_unlock_irqrestore(divider->lock, flags);
138 else
139 __release(divider->lock);
140
141 return 0;
142 }
143
144 static const struct clk_ops clk_half_divider_ops = {
145 .recalc_rate = clk_half_divider_recalc_rate,
146 .determine_rate = clk_half_divider_determine_rate,
147 .set_rate = clk_half_divider_set_rate,
148 };
149
150 /*
151 * Register a clock branch.
152 * Most clock branches have a form like
153 *
154 * src1 --|--\
155 * |M |--[GATE]-[DIV]-
156 * src2 --|--/
157 *
158 * sometimes without one of those components.
159 */
rockchip_clk_register_halfdiv(const char * name,const char * const * parent_names,u8 num_parents,void __iomem * base,int muxdiv_offset,u8 mux_shift,u8 mux_width,u8 mux_flags,u8 div_shift,u8 div_width,u8 div_flags,int gate_offset,u8 gate_shift,u8 gate_flags,unsigned long flags,spinlock_t * lock)160 struct clk *rockchip_clk_register_halfdiv(const char *name,
161 const char *const *parent_names,
162 u8 num_parents, void __iomem *base,
163 int muxdiv_offset, u8 mux_shift,
164 u8 mux_width, u8 mux_flags,
165 u8 div_shift, u8 div_width,
166 u8 div_flags, int gate_offset,
167 u8 gate_shift, u8 gate_flags,
168 unsigned long flags,
169 spinlock_t *lock)
170 {
171 struct clk_hw *hw = ERR_PTR(-ENOMEM);
172 struct clk_mux *mux = NULL;
173 struct clk_gate *gate = NULL;
174 struct clk_divider *div = NULL;
175 const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
176 *gate_ops = NULL;
177
178 if (num_parents > 1) {
179 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
180 if (!mux)
181 return ERR_PTR(-ENOMEM);
182
183 mux->reg = base + muxdiv_offset;
184 mux->shift = mux_shift;
185 mux->mask = BIT(mux_width) - 1;
186 mux->flags = mux_flags;
187 mux->lock = lock;
188 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
189 : &clk_mux_ops;
190 }
191
192 if (gate_offset >= 0) {
193 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
194 if (!gate)
195 goto err_gate;
196
197 gate->flags = gate_flags;
198 gate->reg = base + gate_offset;
199 gate->bit_idx = gate_shift;
200 gate->lock = lock;
201 gate_ops = &clk_gate_ops;
202 }
203
204 if (div_width > 0) {
205 div = kzalloc(sizeof(*div), GFP_KERNEL);
206 if (!div)
207 goto err_div;
208
209 div->flags = div_flags;
210 div->reg = base + muxdiv_offset;
211 div->shift = div_shift;
212 div->width = div_width;
213 div->lock = lock;
214 div_ops = &clk_half_divider_ops;
215 }
216
217 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
218 mux ? &mux->hw : NULL, mux_ops,
219 div ? &div->hw : NULL, div_ops,
220 gate ? &gate->hw : NULL, gate_ops,
221 flags);
222 if (IS_ERR(hw))
223 goto err_div;
224
225 return hw->clk;
226 err_div:
227 kfree(gate);
228 err_gate:
229 kfree(mux);
230 return ERR_CAST(hw);
231 }
232