1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Clock driver for Loongson-1 SoC
4 *
5 * Copyright (C) 2012-2023 Keguang Zhang <keguang.zhang@gmail.com>
6 */
7
8 #include <linux/bits.h>
9 #include <linux/clk-provider.h>
10 #include <linux/container_of.h>
11 #include <linux/io.h>
12 #include <linux/of_address.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/printk.h>
16
17 #include <dt-bindings/clock/loongson,ls1x-clk.h>
18
19 /* Loongson 1 Clock Register Definitions */
20 #define CLK_PLL_FREQ 0x0
21 #define CLK_PLL_DIV 0x4
22
23 static DEFINE_SPINLOCK(ls1x_clk_div_lock);
24
25 struct ls1x_clk_pll_data {
26 u32 fixed;
27 u8 shift;
28 u8 int_shift;
29 u8 int_width;
30 u8 frac_shift;
31 u8 frac_width;
32 };
33
34 struct ls1x_clk_div_data {
35 u8 shift;
36 u8 width;
37 unsigned long flags;
38 const struct clk_div_table *table;
39 u8 bypass_shift;
40 u8 bypass_inv;
41 spinlock_t *lock; /* protect access to DIV registers */
42 };
43
44 struct ls1x_clk {
45 void __iomem *reg;
46 unsigned int offset;
47 struct clk_hw hw;
48 const void *data;
49 };
50
51 #define to_ls1x_clk(_hw) container_of(_hw, struct ls1x_clk, hw)
52
ls1x_pll_rate_part(unsigned int val,unsigned int shift,unsigned int width)53 static inline unsigned long ls1x_pll_rate_part(unsigned int val,
54 unsigned int shift,
55 unsigned int width)
56 {
57 return (val & GENMASK(shift + width, shift)) >> shift;
58 }
59
ls1x_pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)60 static unsigned long ls1x_pll_recalc_rate(struct clk_hw *hw,
61 unsigned long parent_rate)
62 {
63 struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
64 const struct ls1x_clk_pll_data *d = ls1x_clk->data;
65 u32 val, rate;
66
67 val = readl(ls1x_clk->reg);
68 rate = d->fixed;
69 rate += ls1x_pll_rate_part(val, d->int_shift, d->int_width);
70 if (d->frac_width)
71 rate += ls1x_pll_rate_part(val, d->frac_shift, d->frac_width);
72 rate *= parent_rate;
73 rate >>= d->shift;
74
75 return rate;
76 }
77
78 static const struct clk_ops ls1x_pll_clk_ops = {
79 .recalc_rate = ls1x_pll_recalc_rate,
80 };
81
ls1x_divider_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)82 static unsigned long ls1x_divider_recalc_rate(struct clk_hw *hw,
83 unsigned long parent_rate)
84 {
85 struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
86 const struct ls1x_clk_div_data *d = ls1x_clk->data;
87 unsigned int val;
88
89 val = readl(ls1x_clk->reg) >> d->shift;
90 val &= clk_div_mask(d->width);
91
92 return divider_recalc_rate(hw, parent_rate, val, d->table,
93 d->flags, d->width);
94 }
95
ls1x_divider_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)96 static int ls1x_divider_determine_rate(struct clk_hw *hw,
97 struct clk_rate_request *req)
98 {
99 struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
100 const struct ls1x_clk_div_data *d = ls1x_clk->data;
101
102 req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
103 d->table, d->width, d->flags);
104
105 return 0;
106 }
107
ls1x_divider_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)108 static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
109 unsigned long parent_rate)
110 {
111 struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
112 const struct ls1x_clk_div_data *d = ls1x_clk->data;
113 int val, div_val;
114 unsigned long flags = 0;
115
116 div_val = divider_get_val(rate, parent_rate, d->table,
117 d->width, d->flags);
118 if (div_val < 0)
119 return div_val;
120
121 spin_lock_irqsave(d->lock, flags);
122
123 /* Bypass the clock */
124 val = readl(ls1x_clk->reg);
125 if (d->bypass_inv)
126 val &= ~BIT(d->bypass_shift);
127 else
128 val |= BIT(d->bypass_shift);
129 writel(val, ls1x_clk->reg);
130
131 val = readl(ls1x_clk->reg);
132 val &= ~(clk_div_mask(d->width) << d->shift);
133 val |= (u32)div_val << d->shift;
134 writel(val, ls1x_clk->reg);
135
136 /* Restore the clock */
137 val = readl(ls1x_clk->reg);
138 if (d->bypass_inv)
139 val |= BIT(d->bypass_shift);
140 else
141 val &= ~BIT(d->bypass_shift);
142 writel(val, ls1x_clk->reg);
143
144 spin_unlock_irqrestore(d->lock, flags);
145
146 return 0;
147 }
148
149 static const struct clk_ops ls1x_clk_divider_ops = {
150 .recalc_rate = ls1x_divider_recalc_rate,
151 .determine_rate = ls1x_divider_determine_rate,
152 .set_rate = ls1x_divider_set_rate,
153 };
154
155 #define LS1X_CLK_PLL(_name, _offset, _fixed, _shift, \
156 f_shift, f_width, i_shift, i_width) \
157 struct ls1x_clk _name = { \
158 .offset = (_offset), \
159 .data = &(const struct ls1x_clk_pll_data) { \
160 .fixed = (_fixed), \
161 .shift = (_shift), \
162 .int_shift = (i_shift), \
163 .int_width = (i_width), \
164 .frac_shift = (f_shift), \
165 .frac_width = (f_width), \
166 }, \
167 .hw.init = &(const struct clk_init_data) { \
168 .name = #_name, \
169 .ops = &ls1x_pll_clk_ops, \
170 .parent_data = &(const struct clk_parent_data) { \
171 .fw_name = "xtal", \
172 .name = "xtal", \
173 .index = -1, \
174 }, \
175 .num_parents = 1, \
176 }, \
177 }
178
179 #define LS1X_CLK_DIV(_name, _pname, _offset, _shift, _width, \
180 _table, _bypass_shift, _bypass_inv, _flags) \
181 struct ls1x_clk _name = { \
182 .offset = (_offset), \
183 .data = &(const struct ls1x_clk_div_data){ \
184 .shift = (_shift), \
185 .width = (_width), \
186 .table = (_table), \
187 .flags = (_flags), \
188 .bypass_shift = (_bypass_shift), \
189 .bypass_inv = (_bypass_inv), \
190 .lock = &ls1x_clk_div_lock, \
191 }, \
192 .hw.init = &(const struct clk_init_data) { \
193 .name = #_name, \
194 .ops = &ls1x_clk_divider_ops, \
195 .parent_hws = (const struct clk_hw *[]) { _pname }, \
196 .num_parents = 1, \
197 .flags = CLK_GET_RATE_NOCACHE, \
198 }, \
199 }
200
201 static LS1X_CLK_PLL(ls1b_clk_pll, CLK_PLL_FREQ, 12, 1, 0, 5, 0, 0);
202 static LS1X_CLK_DIV(ls1b_clk_cpu, &ls1b_clk_pll.hw, CLK_PLL_DIV,
203 20, 4, NULL, 8, 0,
204 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST);
205 static LS1X_CLK_DIV(ls1b_clk_dc, &ls1b_clk_pll.hw, CLK_PLL_DIV,
206 26, 4, NULL, 12, 0, CLK_DIVIDER_ONE_BASED);
207 static LS1X_CLK_DIV(ls1b_clk_ahb, &ls1b_clk_pll.hw, CLK_PLL_DIV,
208 14, 4, NULL, 10, 0, CLK_DIVIDER_ONE_BASED);
209 static CLK_FIXED_FACTOR(ls1b_clk_apb, "ls1b_clk_apb", "ls1b_clk_ahb", 2, 1,
210 CLK_SET_RATE_PARENT);
211
212 static struct clk_hw_onecell_data ls1b_clk_hw_data = {
213 .hws = {
214 [LS1X_CLKID_PLL] = &ls1b_clk_pll.hw,
215 [LS1X_CLKID_CPU] = &ls1b_clk_cpu.hw,
216 [LS1X_CLKID_DC] = &ls1b_clk_dc.hw,
217 [LS1X_CLKID_AHB] = &ls1b_clk_ahb.hw,
218 [LS1X_CLKID_APB] = &ls1b_clk_apb.hw,
219 },
220 .num = CLK_NR_CLKS,
221 };
222
223 static const struct clk_div_table ls1c_ahb_div_table[] = {
224 [0] = { .val = 0, .div = 2 },
225 [1] = { .val = 1, .div = 4 },
226 [2] = { .val = 2, .div = 3 },
227 [3] = { .val = 3, .div = 3 },
228 [4] = { /* sentinel */ }
229 };
230
231 static LS1X_CLK_PLL(ls1c_clk_pll, CLK_PLL_FREQ, 0, 2, 8, 8, 16, 8);
232 static LS1X_CLK_DIV(ls1c_clk_cpu, &ls1c_clk_pll.hw, CLK_PLL_DIV,
233 8, 7, NULL, 0, 1,
234 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST);
235 static LS1X_CLK_DIV(ls1c_clk_dc, &ls1c_clk_pll.hw, CLK_PLL_DIV,
236 24, 7, NULL, 4, 1, CLK_DIVIDER_ONE_BASED);
237 static LS1X_CLK_DIV(ls1c_clk_ahb, &ls1c_clk_cpu.hw, CLK_PLL_FREQ,
238 0, 2, ls1c_ahb_div_table, 0, 0, CLK_DIVIDER_ALLOW_ZERO);
239 static CLK_FIXED_FACTOR(ls1c_clk_apb, "ls1c_clk_apb", "ls1c_clk_ahb", 1, 1,
240 CLK_SET_RATE_PARENT);
241
242 static struct clk_hw_onecell_data ls1c_clk_hw_data = {
243 .hws = {
244 [LS1X_CLKID_PLL] = &ls1c_clk_pll.hw,
245 [LS1X_CLKID_CPU] = &ls1c_clk_cpu.hw,
246 [LS1X_CLKID_DC] = &ls1c_clk_dc.hw,
247 [LS1X_CLKID_AHB] = &ls1c_clk_ahb.hw,
248 [LS1X_CLKID_APB] = &ls1c_clk_apb.hw,
249 },
250 .num = CLK_NR_CLKS,
251 };
252
ls1x_clk_init(struct device_node * np,struct clk_hw_onecell_data * hw_data)253 static void __init ls1x_clk_init(struct device_node *np,
254 struct clk_hw_onecell_data *hw_data)
255 {
256 struct ls1x_clk *ls1x_clk;
257 void __iomem *reg;
258 int i, ret;
259
260 reg = of_iomap(np, 0);
261 if (!reg) {
262 pr_err("Unable to map base for %pOF\n", np);
263 return;
264 }
265
266 for (i = 0; i < hw_data->num; i++) {
267 /* array might be sparse */
268 if (!hw_data->hws[i])
269 continue;
270
271 if (i != LS1X_CLKID_APB) {
272 ls1x_clk = to_ls1x_clk(hw_data->hws[i]);
273 ls1x_clk->reg = reg + ls1x_clk->offset;
274 }
275
276 ret = of_clk_hw_register(np, hw_data->hws[i]);
277 if (ret)
278 goto err;
279 }
280
281 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, hw_data);
282 if (!ret)
283 return;
284
285 err:
286 pr_err("Failed to register %pOF\n", np);
287
288 while (--i >= 0)
289 clk_hw_unregister(hw_data->hws[i]);
290
291 iounmap(reg);
292 }
293
ls1b_clk_init(struct device_node * np)294 static void __init ls1b_clk_init(struct device_node *np)
295 {
296 return ls1x_clk_init(np, &ls1b_clk_hw_data);
297 }
298
ls1c_clk_init(struct device_node * np)299 static void __init ls1c_clk_init(struct device_node *np)
300 {
301 return ls1x_clk_init(np, &ls1c_clk_hw_data);
302 }
303
304 CLK_OF_DECLARE(ls1b_clk, "loongson,ls1b-clk", ls1b_clk_init);
305 CLK_OF_DECLARE(ls1c_clk, "loongson,ls1c-clk", ls1c_clk_init);
306