1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * OMAP APLL clock support
4 *
5 * Copyright (C) 2013 Texas Instruments, Inc.
6 *
7 * J Keerthy <j-keerthy@ti.com>
8 */
9
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/err.h>
16 #include <linux/string.h>
17 #include <linux/log2.h>
18 #include <linux/of.h>
19 #include <linux/of_address.h>
20 #include <linux/clk/ti.h>
21 #include <linux/delay.h>
22
23 #include "clock.h"
24
25 #define APLL_FORCE_LOCK 0x1
26 #define APLL_AUTO_IDLE 0x2
27 #define MAX_APLL_WAIT_TRIES 1000000
28
29 #undef pr_fmt
30 #define pr_fmt(fmt) "%s: " fmt, __func__
31
dra7_apll_enable(struct clk_hw * hw)32 static int dra7_apll_enable(struct clk_hw *hw)
33 {
34 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
35 int r = 0, i = 0;
36 struct dpll_data *ad;
37 const char *clk_name;
38 u8 state = 1;
39 u32 v;
40
41 ad = clk->dpll_data;
42 if (!ad)
43 return -EINVAL;
44
45 clk_name = clk_hw_get_name(&clk->hw);
46
47 state <<= __ffs(ad->idlest_mask);
48
49 /* Check is already locked */
50 v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
51
52 if ((v & ad->idlest_mask) == state)
53 return r;
54
55 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
56 v &= ~ad->enable_mask;
57 v |= APLL_FORCE_LOCK << __ffs(ad->enable_mask);
58 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
59
60 state <<= __ffs(ad->idlest_mask);
61
62 while (1) {
63 v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
64 if ((v & ad->idlest_mask) == state)
65 break;
66 if (i > MAX_APLL_WAIT_TRIES)
67 break;
68 i++;
69 udelay(1);
70 }
71
72 if (i == MAX_APLL_WAIT_TRIES) {
73 pr_warn("clock: %s failed transition to '%s'\n",
74 clk_name, (state) ? "locked" : "bypassed");
75 r = -EBUSY;
76 } else
77 pr_debug("clock: %s transition to '%s' in %d loops\n",
78 clk_name, (state) ? "locked" : "bypassed", i);
79
80 return r;
81 }
82
dra7_apll_disable(struct clk_hw * hw)83 static void dra7_apll_disable(struct clk_hw *hw)
84 {
85 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
86 struct dpll_data *ad;
87 u8 state = 1;
88 u32 v;
89
90 ad = clk->dpll_data;
91
92 state <<= __ffs(ad->idlest_mask);
93
94 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
95 v &= ~ad->enable_mask;
96 v |= APLL_AUTO_IDLE << __ffs(ad->enable_mask);
97 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
98 }
99
dra7_apll_is_enabled(struct clk_hw * hw)100 static int dra7_apll_is_enabled(struct clk_hw *hw)
101 {
102 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
103 struct dpll_data *ad;
104 u32 v;
105
106 ad = clk->dpll_data;
107
108 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
109 v &= ad->enable_mask;
110
111 v >>= __ffs(ad->enable_mask);
112
113 return v == APLL_AUTO_IDLE ? 0 : 1;
114 }
115
dra7_init_apll_parent(struct clk_hw * hw)116 static u8 dra7_init_apll_parent(struct clk_hw *hw)
117 {
118 return 0;
119 }
120
121 static const struct clk_ops apll_ck_ops = {
122 .enable = &dra7_apll_enable,
123 .disable = &dra7_apll_disable,
124 .is_enabled = &dra7_apll_is_enabled,
125 .get_parent = &dra7_init_apll_parent,
126 };
127
omap_clk_register_apll(void * user,struct device_node * node)128 static void __init omap_clk_register_apll(void *user,
129 struct device_node *node)
130 {
131 struct clk_hw *hw = user;
132 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
133 struct dpll_data *ad = clk_hw->dpll_data;
134 const char *name;
135 struct clk *clk;
136 const struct clk_init_data *init = clk_hw->hw.init;
137
138 clk = of_clk_get(node, 0);
139 if (IS_ERR(clk)) {
140 pr_debug("clk-ref for %pOFn not ready, retry\n",
141 node);
142 if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
143 return;
144
145 goto cleanup;
146 }
147
148 ad->clk_ref = __clk_get_hw(clk);
149
150 clk = of_clk_get(node, 1);
151 if (IS_ERR(clk)) {
152 pr_debug("clk-bypass for %pOFn not ready, retry\n",
153 node);
154 if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
155 return;
156
157 goto cleanup;
158 }
159
160 ad->clk_bypass = __clk_get_hw(clk);
161
162 name = ti_dt_clk_name(node);
163 clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
164 if (!IS_ERR(clk)) {
165 of_clk_add_provider(node, of_clk_src_simple_get, clk);
166 kfree(init->parent_names);
167 kfree(init);
168 return;
169 }
170
171 cleanup:
172 kfree(clk_hw->dpll_data);
173 kfree(init->parent_names);
174 kfree(init);
175 kfree(clk_hw);
176 }
177
of_dra7_apll_setup(struct device_node * node)178 static void __init of_dra7_apll_setup(struct device_node *node)
179 {
180 struct dpll_data *ad = NULL;
181 struct clk_hw_omap *clk_hw = NULL;
182 struct clk_init_data *init = NULL;
183 const char **parent_names = NULL;
184 int ret;
185
186 ad = kzalloc(sizeof(*ad), GFP_KERNEL);
187 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
188 init = kzalloc(sizeof(*init), GFP_KERNEL);
189 if (!ad || !clk_hw || !init)
190 goto cleanup;
191
192 clk_hw->dpll_data = ad;
193 clk_hw->hw.init = init;
194
195 init->name = ti_dt_clk_name(node);
196 init->ops = &apll_ck_ops;
197
198 init->num_parents = of_clk_get_parent_count(node);
199 if (init->num_parents < 1) {
200 pr_err("dra7 apll %pOFn must have parent(s)\n", node);
201 goto cleanup;
202 }
203
204 parent_names = kcalloc(init->num_parents, sizeof(char *), GFP_KERNEL);
205 if (!parent_names)
206 goto cleanup;
207
208 of_clk_parent_fill(node, parent_names, init->num_parents);
209
210 init->parent_names = parent_names;
211
212 ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg);
213 ret |= ti_clk_get_reg_addr(node, 1, &ad->idlest_reg);
214
215 if (ret)
216 goto cleanup;
217
218 ad->idlest_mask = 0x1;
219 ad->enable_mask = 0x3;
220
221 omap_clk_register_apll(&clk_hw->hw, node);
222 return;
223
224 cleanup:
225 kfree(parent_names);
226 kfree(ad);
227 kfree(clk_hw);
228 kfree(init);
229 }
230 CLK_OF_DECLARE(dra7_apll_clock, "ti,dra7-apll-clock", of_dra7_apll_setup);
231
232 #define OMAP2_EN_APLL_LOCKED 0x3
233 #define OMAP2_EN_APLL_STOPPED 0x0
234
omap2_apll_is_enabled(struct clk_hw * hw)235 static int omap2_apll_is_enabled(struct clk_hw *hw)
236 {
237 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
238 struct dpll_data *ad = clk->dpll_data;
239 u32 v;
240
241 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
242 v &= ad->enable_mask;
243
244 v >>= __ffs(ad->enable_mask);
245
246 return v == OMAP2_EN_APLL_LOCKED ? 1 : 0;
247 }
248
omap2_apll_recalc(struct clk_hw * hw,unsigned long parent_rate)249 static unsigned long omap2_apll_recalc(struct clk_hw *hw,
250 unsigned long parent_rate)
251 {
252 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
253
254 if (omap2_apll_is_enabled(hw))
255 return clk->fixed_rate;
256
257 return 0;
258 }
259
omap2_apll_enable(struct clk_hw * hw)260 static int omap2_apll_enable(struct clk_hw *hw)
261 {
262 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
263 struct dpll_data *ad = clk->dpll_data;
264 u32 v;
265 int i = 0;
266
267 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
268 v &= ~ad->enable_mask;
269 v |= OMAP2_EN_APLL_LOCKED << __ffs(ad->enable_mask);
270 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
271
272 while (1) {
273 v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
274 if (v & ad->idlest_mask)
275 break;
276 if (i > MAX_APLL_WAIT_TRIES)
277 break;
278 i++;
279 udelay(1);
280 }
281
282 if (i == MAX_APLL_WAIT_TRIES) {
283 pr_warn("%s failed to transition to locked\n",
284 clk_hw_get_name(&clk->hw));
285 return -EBUSY;
286 }
287
288 return 0;
289 }
290
omap2_apll_disable(struct clk_hw * hw)291 static void omap2_apll_disable(struct clk_hw *hw)
292 {
293 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
294 struct dpll_data *ad = clk->dpll_data;
295 u32 v;
296
297 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
298 v &= ~ad->enable_mask;
299 v |= OMAP2_EN_APLL_STOPPED << __ffs(ad->enable_mask);
300 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
301 }
302
303 static const struct clk_ops omap2_apll_ops = {
304 .enable = &omap2_apll_enable,
305 .disable = &omap2_apll_disable,
306 .is_enabled = &omap2_apll_is_enabled,
307 .recalc_rate = &omap2_apll_recalc,
308 };
309
omap2_apll_set_autoidle(struct clk_hw_omap * clk,u32 val)310 static void omap2_apll_set_autoidle(struct clk_hw_omap *clk, u32 val)
311 {
312 struct dpll_data *ad = clk->dpll_data;
313 u32 v;
314
315 v = ti_clk_ll_ops->clk_readl(&ad->autoidle_reg);
316 v &= ~ad->autoidle_mask;
317 v |= val << __ffs(ad->autoidle_mask);
318 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
319 }
320
321 #define OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP 0x3
322 #define OMAP2_APLL_AUTOIDLE_DISABLE 0x0
323
omap2_apll_allow_idle(struct clk_hw_omap * clk)324 static void omap2_apll_allow_idle(struct clk_hw_omap *clk)
325 {
326 omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP);
327 }
328
omap2_apll_deny_idle(struct clk_hw_omap * clk)329 static void omap2_apll_deny_idle(struct clk_hw_omap *clk)
330 {
331 omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_DISABLE);
332 }
333
334 static const struct clk_hw_omap_ops omap2_apll_hwops = {
335 .allow_idle = &omap2_apll_allow_idle,
336 .deny_idle = &omap2_apll_deny_idle,
337 };
338
of_omap2_apll_setup(struct device_node * node)339 static void __init of_omap2_apll_setup(struct device_node *node)
340 {
341 struct dpll_data *ad = NULL;
342 struct clk_hw_omap *clk_hw = NULL;
343 struct clk_init_data *init = NULL;
344 const char *name;
345 struct clk *clk;
346 const char *parent_name;
347 u32 val;
348 int ret;
349
350 ad = kzalloc(sizeof(*ad), GFP_KERNEL);
351 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
352 init = kzalloc(sizeof(*init), GFP_KERNEL);
353
354 if (!ad || !clk_hw || !init)
355 goto cleanup;
356
357 clk_hw->dpll_data = ad;
358 clk_hw->hw.init = init;
359 init->ops = &omap2_apll_ops;
360 name = ti_dt_clk_name(node);
361 init->name = name;
362 clk_hw->ops = &omap2_apll_hwops;
363
364 init->num_parents = of_clk_get_parent_count(node);
365 if (init->num_parents != 1) {
366 pr_err("%pOFn must have one parent\n", node);
367 goto cleanup;
368 }
369
370 parent_name = of_clk_get_parent_name(node, 0);
371 init->parent_names = &parent_name;
372
373 if (of_property_read_u32(node, "ti,clock-frequency", &val)) {
374 pr_err("%pOFn missing clock-frequency\n", node);
375 goto cleanup;
376 }
377 clk_hw->fixed_rate = val;
378
379 clk_hw->enable_bit = ti_clk_get_legacy_bit_shift(node);
380 ad->enable_mask = 0x3 << clk_hw->enable_bit;
381 ad->autoidle_mask = 0x3 << clk_hw->enable_bit;
382
383 if (of_property_read_u32(node, "ti,idlest-shift", &val)) {
384 pr_err("%pOFn missing idlest-shift\n", node);
385 goto cleanup;
386 }
387
388 ad->idlest_mask = 1 << val;
389
390 ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg);
391 ret |= ti_clk_get_reg_addr(node, 1, &ad->autoidle_reg);
392 ret |= ti_clk_get_reg_addr(node, 2, &ad->idlest_reg);
393
394 if (ret)
395 goto cleanup;
396
397 name = ti_dt_clk_name(node);
398 clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
399 if (!IS_ERR(clk)) {
400 of_clk_add_provider(node, of_clk_src_simple_get, clk);
401 kfree(init);
402 return;
403 }
404 cleanup:
405 kfree(ad);
406 kfree(clk_hw);
407 kfree(init);
408 }
409 CLK_OF_DECLARE(omap2_apll_clock, "ti,omap2-apll-clock",
410 of_omap2_apll_setup);
411