1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Synopsys HSDK SDP Generic PLL clock driver
4 *
5 * Copyright (C) 2017 Synopsys
6 */
7
8 #include <linux/clk-provider.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/err.h>
12 #include <linux/io.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17
18 #define CGU_PLL_CTRL 0x000 /* ARC PLL control register */
19 #define CGU_PLL_STATUS 0x004 /* ARC PLL status register */
20 #define CGU_PLL_FMEAS 0x008 /* ARC PLL frequency measurement register */
21 #define CGU_PLL_MON 0x00C /* ARC PLL monitor register */
22
23 #define CGU_PLL_CTRL_ODIV_SHIFT 2
24 #define CGU_PLL_CTRL_IDIV_SHIFT 4
25 #define CGU_PLL_CTRL_FBDIV_SHIFT 9
26 #define CGU_PLL_CTRL_BAND_SHIFT 20
27
28 #define CGU_PLL_CTRL_ODIV_MASK GENMASK(3, CGU_PLL_CTRL_ODIV_SHIFT)
29 #define CGU_PLL_CTRL_IDIV_MASK GENMASK(8, CGU_PLL_CTRL_IDIV_SHIFT)
30 #define CGU_PLL_CTRL_FBDIV_MASK GENMASK(15, CGU_PLL_CTRL_FBDIV_SHIFT)
31
32 #define CGU_PLL_CTRL_PD BIT(0)
33 #define CGU_PLL_CTRL_BYPASS BIT(1)
34
35 #define CGU_PLL_STATUS_LOCK BIT(0)
36 #define CGU_PLL_STATUS_ERR BIT(1)
37
38 #define HSDK_PLL_MAX_LOCK_TIME 100 /* 100 us */
39
40 #define CGU_PLL_SOURCE_MAX 1
41
42 #define CORE_IF_CLK_THRESHOLD_HZ 500000000
43 #define CREG_CORE_IF_CLK_DIV_1 0x0
44 #define CREG_CORE_IF_CLK_DIV_2 0x1
45
46 struct hsdk_pll_cfg {
47 u32 rate;
48 u32 idiv;
49 u32 fbdiv;
50 u32 odiv;
51 u32 band;
52 u32 bypass;
53 };
54
55 static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
56 { 100000000, 0, 11, 3, 0, 0 },
57 { 133000000, 0, 15, 3, 0, 0 },
58 { 200000000, 1, 47, 3, 0, 0 },
59 { 233000000, 1, 27, 2, 0, 0 },
60 { 300000000, 1, 35, 2, 0, 0 },
61 { 333000000, 1, 39, 2, 0, 0 },
62 { 400000000, 1, 47, 2, 0, 0 },
63 { 500000000, 0, 14, 1, 0, 0 },
64 { 600000000, 0, 17, 1, 0, 0 },
65 { 700000000, 0, 20, 1, 0, 0 },
66 { 800000000, 0, 23, 1, 0, 0 },
67 { 900000000, 1, 26, 0, 0, 0 },
68 { 1000000000, 1, 29, 0, 0, 0 },
69 { 1100000000, 1, 32, 0, 0, 0 },
70 { 1200000000, 1, 35, 0, 0, 0 },
71 { 1300000000, 1, 38, 0, 0, 0 },
72 { 1400000000, 1, 41, 0, 0, 0 },
73 { 1500000000, 1, 44, 0, 0, 0 },
74 { 1600000000, 1, 47, 0, 0, 0 },
75 {}
76 };
77
78 static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
79 { 27000000, 0, 0, 0, 0, 1 },
80 { 148500000, 0, 21, 3, 0, 0 },
81 { 297000000, 0, 21, 2, 0, 0 },
82 { 540000000, 0, 19, 1, 0, 0 },
83 { 594000000, 0, 21, 1, 0, 0 },
84 {}
85 };
86
87 struct hsdk_pll_clk {
88 struct clk_hw hw;
89 void __iomem *regs;
90 void __iomem *spec_regs;
91 const struct hsdk_pll_devdata *pll_devdata;
92 struct device *dev;
93 };
94
95 struct hsdk_pll_devdata {
96 const struct hsdk_pll_cfg *pll_cfg;
97 int (*update_rate)(struct hsdk_pll_clk *clk, unsigned long rate,
98 const struct hsdk_pll_cfg *cfg);
99 };
100
101 static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *, unsigned long,
102 const struct hsdk_pll_cfg *);
103 static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *, unsigned long,
104 const struct hsdk_pll_cfg *);
105
106 static const struct hsdk_pll_devdata core_pll_devdata = {
107 .pll_cfg = asdt_pll_cfg,
108 .update_rate = hsdk_pll_core_update_rate,
109 };
110
111 static const struct hsdk_pll_devdata sdt_pll_devdata = {
112 .pll_cfg = asdt_pll_cfg,
113 .update_rate = hsdk_pll_comm_update_rate,
114 };
115
116 static const struct hsdk_pll_devdata hdmi_pll_devdata = {
117 .pll_cfg = hdmi_pll_cfg,
118 .update_rate = hsdk_pll_comm_update_rate,
119 };
120
hsdk_pll_write(struct hsdk_pll_clk * clk,u32 reg,u32 val)121 static inline void hsdk_pll_write(struct hsdk_pll_clk *clk, u32 reg, u32 val)
122 {
123 iowrite32(val, clk->regs + reg);
124 }
125
hsdk_pll_read(struct hsdk_pll_clk * clk,u32 reg)126 static inline u32 hsdk_pll_read(struct hsdk_pll_clk *clk, u32 reg)
127 {
128 return ioread32(clk->regs + reg);
129 }
130
hsdk_pll_set_cfg(struct hsdk_pll_clk * clk,const struct hsdk_pll_cfg * cfg)131 static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
132 const struct hsdk_pll_cfg *cfg)
133 {
134 u32 val = 0;
135
136 if (cfg->bypass) {
137 val = hsdk_pll_read(clk, CGU_PLL_CTRL);
138 val |= CGU_PLL_CTRL_BYPASS;
139 } else {
140 /* Powerdown and Bypass bits should be cleared */
141 val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
142 val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
143 val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
144 val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
145 }
146
147 dev_dbg(clk->dev, "write configuration: %#x\n", val);
148
149 hsdk_pll_write(clk, CGU_PLL_CTRL, val);
150 }
151
hsdk_pll_is_locked(struct hsdk_pll_clk * clk)152 static inline bool hsdk_pll_is_locked(struct hsdk_pll_clk *clk)
153 {
154 return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK);
155 }
156
hsdk_pll_is_err(struct hsdk_pll_clk * clk)157 static inline bool hsdk_pll_is_err(struct hsdk_pll_clk *clk)
158 {
159 return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR);
160 }
161
to_hsdk_pll_clk(struct clk_hw * hw)162 static inline struct hsdk_pll_clk *to_hsdk_pll_clk(struct clk_hw *hw)
163 {
164 return container_of(hw, struct hsdk_pll_clk, hw);
165 }
166
hsdk_pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)167 static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
168 unsigned long parent_rate)
169 {
170 u32 val;
171 u64 rate;
172 u32 idiv, fbdiv, odiv;
173 struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
174
175 val = hsdk_pll_read(clk, CGU_PLL_CTRL);
176
177 dev_dbg(clk->dev, "current configuration: %#x\n", val);
178
179 /* Check if PLL is bypassed */
180 if (val & CGU_PLL_CTRL_BYPASS)
181 return parent_rate;
182
183 /* Check if PLL is disabled */
184 if (val & CGU_PLL_CTRL_PD)
185 return 0;
186
187 /* input divider = reg.idiv + 1 */
188 idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
189 /* fb divider = 2*(reg.fbdiv + 1) */
190 fbdiv = 2 * (1 + ((val & CGU_PLL_CTRL_FBDIV_MASK) >> CGU_PLL_CTRL_FBDIV_SHIFT));
191 /* output divider = 2^(reg.odiv) */
192 odiv = 1 << ((val & CGU_PLL_CTRL_ODIV_MASK) >> CGU_PLL_CTRL_ODIV_SHIFT);
193
194 rate = (u64)parent_rate * fbdiv;
195 do_div(rate, idiv * odiv);
196
197 return rate;
198 }
199
hsdk_pll_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)200 static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
201 unsigned long *prate)
202 {
203 int i;
204 unsigned long best_rate;
205 struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
206 const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
207
208 if (pll_cfg[0].rate == 0)
209 return -EINVAL;
210
211 best_rate = pll_cfg[0].rate;
212
213 for (i = 1; pll_cfg[i].rate != 0; i++) {
214 if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
215 best_rate = pll_cfg[i].rate;
216 }
217
218 dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
219
220 return best_rate;
221 }
222
hsdk_pll_comm_update_rate(struct hsdk_pll_clk * clk,unsigned long rate,const struct hsdk_pll_cfg * cfg)223 static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
224 unsigned long rate,
225 const struct hsdk_pll_cfg *cfg)
226 {
227 hsdk_pll_set_cfg(clk, cfg);
228
229 /*
230 * Wait until CGU relocks and check error status.
231 * If after timeout CGU is unlocked yet return error.
232 */
233 udelay(HSDK_PLL_MAX_LOCK_TIME);
234 if (!hsdk_pll_is_locked(clk))
235 return -ETIMEDOUT;
236
237 if (hsdk_pll_is_err(clk))
238 return -EINVAL;
239
240 return 0;
241 }
242
hsdk_pll_core_update_rate(struct hsdk_pll_clk * clk,unsigned long rate,const struct hsdk_pll_cfg * cfg)243 static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
244 unsigned long rate,
245 const struct hsdk_pll_cfg *cfg)
246 {
247 /*
248 * When core clock exceeds 500MHz, the divider for the interface
249 * clock must be programmed to div-by-2.
250 */
251 if (rate > CORE_IF_CLK_THRESHOLD_HZ)
252 iowrite32(CREG_CORE_IF_CLK_DIV_2, clk->spec_regs);
253
254 hsdk_pll_set_cfg(clk, cfg);
255
256 /*
257 * Wait until CGU relocks and check error status.
258 * If after timeout CGU is unlocked yet return error.
259 */
260 udelay(HSDK_PLL_MAX_LOCK_TIME);
261 if (!hsdk_pll_is_locked(clk))
262 return -ETIMEDOUT;
263
264 if (hsdk_pll_is_err(clk))
265 return -EINVAL;
266
267 /*
268 * Program divider to div-by-1 if we succesfuly set core clock below
269 * 500MHz threshold.
270 */
271 if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
272 iowrite32(CREG_CORE_IF_CLK_DIV_1, clk->spec_regs);
273
274 return 0;
275 }
276
hsdk_pll_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)277 static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
278 unsigned long parent_rate)
279 {
280 int i;
281 struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
282 const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
283
284 for (i = 0; pll_cfg[i].rate != 0; i++) {
285 if (pll_cfg[i].rate == rate) {
286 return clk->pll_devdata->update_rate(clk, rate,
287 &pll_cfg[i]);
288 }
289 }
290
291 dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
292 parent_rate);
293
294 return -EINVAL;
295 }
296
297 static const struct clk_ops hsdk_pll_ops = {
298 .recalc_rate = hsdk_pll_recalc_rate,
299 .round_rate = hsdk_pll_round_rate,
300 .set_rate = hsdk_pll_set_rate,
301 };
302
hsdk_pll_clk_probe(struct platform_device * pdev)303 static int hsdk_pll_clk_probe(struct platform_device *pdev)
304 {
305 int ret;
306 const char *parent_name;
307 unsigned int num_parents;
308 struct hsdk_pll_clk *pll_clk;
309 struct clk_init_data init = { };
310 struct device *dev = &pdev->dev;
311
312 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
313 if (!pll_clk)
314 return -ENOMEM;
315
316 pll_clk->regs = devm_platform_ioremap_resource(pdev, 0);
317 if (IS_ERR(pll_clk->regs))
318 return PTR_ERR(pll_clk->regs);
319
320 init.name = dev->of_node->name;
321 init.ops = &hsdk_pll_ops;
322 parent_name = of_clk_get_parent_name(dev->of_node, 0);
323 init.parent_names = &parent_name;
324 num_parents = of_clk_get_parent_count(dev->of_node);
325 if (num_parents == 0 || num_parents > CGU_PLL_SOURCE_MAX) {
326 dev_err(dev, "wrong clock parents number: %u\n", num_parents);
327 return -EINVAL;
328 }
329 init.num_parents = num_parents;
330
331 pll_clk->hw.init = &init;
332 pll_clk->dev = dev;
333 pll_clk->pll_devdata = of_device_get_match_data(dev);
334
335 if (!pll_clk->pll_devdata) {
336 dev_err(dev, "No OF match data provided\n");
337 return -EINVAL;
338 }
339
340 ret = devm_clk_hw_register(dev, &pll_clk->hw);
341 if (ret) {
342 dev_err(dev, "failed to register %s clock\n", init.name);
343 return ret;
344 }
345
346 return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
347 &pll_clk->hw);
348 }
349
of_hsdk_pll_clk_setup(struct device_node * node)350 static void __init of_hsdk_pll_clk_setup(struct device_node *node)
351 {
352 int ret;
353 const char *parent_name;
354 unsigned int num_parents;
355 struct hsdk_pll_clk *pll_clk;
356 struct clk_init_data init = { };
357
358 pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
359 if (!pll_clk)
360 return;
361
362 pll_clk->regs = of_iomap(node, 0);
363 if (!pll_clk->regs) {
364 pr_err("failed to map pll registers\n");
365 goto err_free_pll_clk;
366 }
367
368 pll_clk->spec_regs = of_iomap(node, 1);
369 if (!pll_clk->spec_regs) {
370 pr_err("failed to map pll registers\n");
371 goto err_unmap_comm_regs;
372 }
373
374 init.name = node->name;
375 init.ops = &hsdk_pll_ops;
376 parent_name = of_clk_get_parent_name(node, 0);
377 init.parent_names = &parent_name;
378 num_parents = of_clk_get_parent_count(node);
379 if (num_parents > CGU_PLL_SOURCE_MAX) {
380 pr_err("too much clock parents: %u\n", num_parents);
381 goto err_unmap_spec_regs;
382 }
383 init.num_parents = num_parents;
384
385 pll_clk->hw.init = &init;
386 pll_clk->pll_devdata = &core_pll_devdata;
387
388 ret = clk_hw_register(NULL, &pll_clk->hw);
389 if (ret) {
390 pr_err("failed to register %pOFn clock\n", node);
391 goto err_unmap_spec_regs;
392 }
393
394 ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
395 if (ret) {
396 pr_err("failed to add hw provider for %pOFn clock\n", node);
397 goto err_unmap_spec_regs;
398 }
399
400 return;
401
402 err_unmap_spec_regs:
403 iounmap(pll_clk->spec_regs);
404 err_unmap_comm_regs:
405 iounmap(pll_clk->regs);
406 err_free_pll_clk:
407 kfree(pll_clk);
408 }
409
410 /* Core PLL needed early for ARC cpus timers */
411 CLK_OF_DECLARE(hsdk_pll_clock, "snps,hsdk-core-pll-clock",
412 of_hsdk_pll_clk_setup);
413
414 static const struct of_device_id hsdk_pll_clk_id[] = {
415 { .compatible = "snps,hsdk-gp-pll-clock", .data = &sdt_pll_devdata},
416 { .compatible = "snps,hsdk-hdmi-pll-clock", .data = &hdmi_pll_devdata},
417 { }
418 };
419
420 static struct platform_driver hsdk_pll_clk_driver = {
421 .driver = {
422 .name = "hsdk-gp-pll-clock",
423 .of_match_table = hsdk_pll_clk_id,
424 },
425 .probe = hsdk_pll_clk_probe,
426 };
427 builtin_platform_driver(hsdk_pll_clk_driver);
428