1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * OMAP DPLL clock support
4 *
5 * Copyright (C) 2013 Texas Instruments, Inc.
6 *
7 * Tero Kristo <t-kristo@ti.com>
8 */
9
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/clk/ti.h>
17 #include "clock.h"
18
19 #undef pr_fmt
20 #define pr_fmt(fmt) "%s: " fmt, __func__
21
22 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
23 defined(CONFIG_SOC_DRA7XX)
24 static const struct clk_ops dpll_m4xen_ck_ops = {
25 .enable = &omap3_noncore_dpll_enable,
26 .disable = &omap3_noncore_dpll_disable,
27 .recalc_rate = &omap4_dpll_regm4xen_recalc,
28 .set_rate = &omap3_noncore_dpll_set_rate,
29 .set_parent = &omap3_noncore_dpll_set_parent,
30 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
31 .determine_rate = &omap4_dpll_regm4xen_determine_rate,
32 .get_parent = &omap2_init_dpll_parent,
33 .save_context = &omap3_core_dpll_save_context,
34 .restore_context = &omap3_core_dpll_restore_context,
35 };
36 #endif
37
38 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \
39 defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) || \
40 defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
41 static const struct clk_ops dpll_core_ck_ops = {
42 .recalc_rate = &omap3_dpll_recalc,
43 .get_parent = &omap2_init_dpll_parent,
44 };
45
46 static const struct clk_ops dpll_ck_ops = {
47 .enable = &omap3_noncore_dpll_enable,
48 .disable = &omap3_noncore_dpll_disable,
49 .recalc_rate = &omap3_dpll_recalc,
50 .set_rate = &omap3_noncore_dpll_set_rate,
51 .set_parent = &omap3_noncore_dpll_set_parent,
52 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
53 .determine_rate = &omap3_noncore_dpll_determine_rate,
54 .get_parent = &omap2_init_dpll_parent,
55 .save_context = &omap3_noncore_dpll_save_context,
56 .restore_context = &omap3_noncore_dpll_restore_context,
57 };
58
59 static const struct clk_ops dpll_no_gate_ck_ops = {
60 .recalc_rate = &omap3_dpll_recalc,
61 .get_parent = &omap2_init_dpll_parent,
62 .set_rate = &omap3_noncore_dpll_set_rate,
63 .set_parent = &omap3_noncore_dpll_set_parent,
64 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
65 .determine_rate = &omap3_noncore_dpll_determine_rate,
66 .save_context = &omap3_noncore_dpll_save_context,
67 .restore_context = &omap3_noncore_dpll_restore_context
68 };
69 #else
70 static const struct clk_ops dpll_core_ck_ops = {};
71 static const struct clk_ops dpll_ck_ops = {};
72 static const struct clk_ops dpll_no_gate_ck_ops = {};
73 const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
74 #endif
75
76 #ifdef CONFIG_ARCH_OMAP2
77 static const struct clk_ops omap2_dpll_core_ck_ops = {
78 .get_parent = &omap2_init_dpll_parent,
79 .recalc_rate = &omap2_dpllcore_recalc,
80 .determine_rate = &omap2_dpll_determine_rate,
81 .set_rate = &omap2_reprogram_dpllcore,
82 };
83 #else
84 static const struct clk_ops omap2_dpll_core_ck_ops = {};
85 #endif
86
87 #ifdef CONFIG_ARCH_OMAP3
88 static const struct clk_ops omap3_dpll_core_ck_ops = {
89 .get_parent = &omap2_init_dpll_parent,
90 .recalc_rate = &omap3_dpll_recalc,
91 .determine_rate = &omap2_dpll_determine_rate,
92 };
93
94 static const struct clk_ops omap3_dpll_ck_ops = {
95 .enable = &omap3_noncore_dpll_enable,
96 .disable = &omap3_noncore_dpll_disable,
97 .get_parent = &omap2_init_dpll_parent,
98 .recalc_rate = &omap3_dpll_recalc,
99 .set_rate = &omap3_noncore_dpll_set_rate,
100 .set_parent = &omap3_noncore_dpll_set_parent,
101 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
102 .determine_rate = &omap3_noncore_dpll_determine_rate,
103 };
104
105 static const struct clk_ops omap3_dpll5_ck_ops = {
106 .enable = &omap3_noncore_dpll_enable,
107 .disable = &omap3_noncore_dpll_disable,
108 .get_parent = &omap2_init_dpll_parent,
109 .recalc_rate = &omap3_dpll_recalc,
110 .set_rate = &omap3_dpll5_set_rate,
111 .set_parent = &omap3_noncore_dpll_set_parent,
112 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
113 .determine_rate = &omap3_noncore_dpll_determine_rate,
114 };
115
116 static const struct clk_ops omap3_dpll_per_ck_ops = {
117 .enable = &omap3_noncore_dpll_enable,
118 .disable = &omap3_noncore_dpll_disable,
119 .get_parent = &omap2_init_dpll_parent,
120 .recalc_rate = &omap3_dpll_recalc,
121 .set_rate = &omap3_dpll4_set_rate,
122 .set_parent = &omap3_noncore_dpll_set_parent,
123 .set_rate_and_parent = &omap3_dpll4_set_rate_and_parent,
124 .determine_rate = &omap3_noncore_dpll_determine_rate,
125 };
126 #endif
127
128 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
129 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
130 defined(CONFIG_SOC_AM43XX)
131 static const struct clk_ops dpll_x2_ck_ops = {
132 .recalc_rate = &omap3_clkoutx2_recalc,
133 };
134 #endif
135
136 /**
137 * _register_dpll - low level registration of a DPLL clock
138 * @user: pointer to the hardware clock definition for the clock
139 * @node: device node for the clock
140 *
141 * Finalizes DPLL registration process. In case a failure (clk-ref or
142 * clk-bypass is missing), the clock is added to retry list and
143 * the initialization is retried on later stage.
144 */
_register_dpll(void * user,struct device_node * node)145 static void __init _register_dpll(void *user,
146 struct device_node *node)
147 {
148 struct clk_hw *hw = user;
149 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
150 struct dpll_data *dd = clk_hw->dpll_data;
151 const char *name;
152 struct clk *clk;
153 const struct clk_init_data *init = hw->init;
154
155 clk = of_clk_get(node, 0);
156 if (IS_ERR(clk)) {
157 pr_debug("clk-ref missing for %pOFn, retry later\n",
158 node);
159 if (!ti_clk_retry_init(node, hw, _register_dpll))
160 return;
161
162 goto cleanup;
163 }
164
165 dd->clk_ref = __clk_get_hw(clk);
166
167 clk = of_clk_get(node, 1);
168
169 if (IS_ERR(clk)) {
170 pr_debug("clk-bypass missing for %pOFn, retry later\n",
171 node);
172 if (!ti_clk_retry_init(node, hw, _register_dpll))
173 return;
174
175 goto cleanup;
176 }
177
178 dd->clk_bypass = __clk_get_hw(clk);
179
180 /* register the clock */
181 name = ti_dt_clk_name(node);
182 clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
183
184 if (!IS_ERR(clk)) {
185 of_clk_add_provider(node, of_clk_src_simple_get, clk);
186 kfree(init->parent_names);
187 kfree(init);
188 return;
189 }
190
191 cleanup:
192 kfree(clk_hw->dpll_data);
193 kfree(init->parent_names);
194 kfree(init);
195 kfree(clk_hw);
196 }
197
198 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
199 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
200 defined(CONFIG_SOC_AM43XX)
201 /**
202 * _register_dpll_x2 - Registers a DPLLx2 clock
203 * @node: device node for this clock
204 * @ops: clk_ops for this clock
205 * @hw_ops: clk_hw_ops for this clock
206 *
207 * Initializes a DPLL x 2 clock from device tree data.
208 */
_register_dpll_x2(struct device_node * node,const struct clk_ops * ops,const struct clk_hw_omap_ops * hw_ops)209 static void _register_dpll_x2(struct device_node *node,
210 const struct clk_ops *ops,
211 const struct clk_hw_omap_ops *hw_ops)
212 {
213 struct clk *clk;
214 struct clk_init_data init = { NULL };
215 struct clk_hw_omap *clk_hw;
216 const char *name = ti_dt_clk_name(node);
217 const char *parent_name;
218
219 parent_name = of_clk_get_parent_name(node, 0);
220 if (!parent_name) {
221 pr_err("%pOFn must have parent\n", node);
222 return;
223 }
224
225 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
226 if (!clk_hw)
227 return;
228
229 clk_hw->ops = hw_ops;
230 clk_hw->hw.init = &init;
231
232 init.name = name;
233 init.ops = ops;
234 init.parent_names = &parent_name;
235 init.num_parents = 1;
236
237 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
238 defined(CONFIG_SOC_DRA7XX)
239 if (hw_ops == &clkhwops_omap4_dpllmx) {
240 int ret;
241
242 /* Check if register defined, if not, drop hw-ops */
243 ret = of_property_count_elems_of_size(node, "reg", 1);
244 if (ret <= 0) {
245 clk_hw->ops = NULL;
246 } else if (ti_clk_get_reg_addr(node, 0, &clk_hw->clksel_reg)) {
247 kfree(clk_hw);
248 return;
249 }
250 }
251 #endif
252
253 /* register the clock */
254 clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
255
256 if (IS_ERR(clk))
257 kfree(clk_hw);
258 else
259 of_clk_add_provider(node, of_clk_src_simple_get, clk);
260 }
261 #endif
262
263 /**
264 * of_ti_dpll_setup - Setup function for OMAP DPLL clocks
265 * @node: device node containing the DPLL info
266 * @ops: ops for the DPLL
267 * @ddt: DPLL data template to use
268 *
269 * Initializes a DPLL clock from device tree data.
270 */
of_ti_dpll_setup(struct device_node * node,const struct clk_ops * ops,const struct dpll_data * ddt)271 static void __init of_ti_dpll_setup(struct device_node *node,
272 const struct clk_ops *ops,
273 const struct dpll_data *ddt)
274 {
275 struct clk_hw_omap *clk_hw = NULL;
276 struct clk_init_data *init = NULL;
277 const char **parent_names = NULL;
278 struct dpll_data *dd = NULL;
279 int ssc_clk_index;
280 u8 dpll_mode = 0;
281 u32 min_div;
282
283 dd = kmemdup(ddt, sizeof(*dd), GFP_KERNEL);
284 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
285 init = kzalloc(sizeof(*init), GFP_KERNEL);
286 if (!dd || !clk_hw || !init)
287 goto cleanup;
288
289 clk_hw->dpll_data = dd;
290 clk_hw->ops = &clkhwops_omap3_dpll;
291 clk_hw->hw.init = init;
292
293 init->name = ti_dt_clk_name(node);
294 init->ops = ops;
295
296 init->num_parents = of_clk_get_parent_count(node);
297 if (!init->num_parents) {
298 pr_err("%pOFn must have parent(s)\n", node);
299 goto cleanup;
300 }
301
302 parent_names = kcalloc(init->num_parents, sizeof(char *), GFP_KERNEL);
303 if (!parent_names)
304 goto cleanup;
305
306 of_clk_parent_fill(node, parent_names, init->num_parents);
307
308 init->parent_names = parent_names;
309
310 if (ti_clk_get_reg_addr(node, 0, &dd->control_reg))
311 goto cleanup;
312
313 /*
314 * Special case for OMAP2 DPLL, register order is different due to
315 * missing idlest_reg, also clkhwops is different. Detected from
316 * missing idlest_mask.
317 */
318 if (!dd->idlest_mask) {
319 if (ti_clk_get_reg_addr(node, 1, &dd->mult_div1_reg))
320 goto cleanup;
321 #ifdef CONFIG_ARCH_OMAP2
322 clk_hw->ops = &clkhwops_omap2xxx_dpll;
323 omap2xxx_clkt_dpllcore_init(&clk_hw->hw);
324 #endif
325 } else {
326 if (ti_clk_get_reg_addr(node, 1, &dd->idlest_reg))
327 goto cleanup;
328
329 if (ti_clk_get_reg_addr(node, 2, &dd->mult_div1_reg))
330 goto cleanup;
331 }
332
333 if (dd->autoidle_mask) {
334 if (ti_clk_get_reg_addr(node, 3, &dd->autoidle_reg))
335 goto cleanup;
336
337 ssc_clk_index = 4;
338 } else {
339 ssc_clk_index = 3;
340 }
341
342 if (dd->ssc_deltam_int_mask && dd->ssc_deltam_frac_mask &&
343 dd->ssc_modfreq_mant_mask && dd->ssc_modfreq_exp_mask) {
344 if (ti_clk_get_reg_addr(node, ssc_clk_index++,
345 &dd->ssc_deltam_reg))
346 goto cleanup;
347
348 if (ti_clk_get_reg_addr(node, ssc_clk_index++,
349 &dd->ssc_modfreq_reg))
350 goto cleanup;
351
352 of_property_read_u32(node, "ti,ssc-modfreq-hz",
353 &dd->ssc_modfreq);
354 of_property_read_u32(node, "ti,ssc-deltam", &dd->ssc_deltam);
355 dd->ssc_downspread =
356 of_property_read_bool(node, "ti,ssc-downspread");
357 }
358
359 if (of_property_read_bool(node, "ti,low-power-stop"))
360 dpll_mode |= 1 << DPLL_LOW_POWER_STOP;
361
362 if (of_property_read_bool(node, "ti,low-power-bypass"))
363 dpll_mode |= 1 << DPLL_LOW_POWER_BYPASS;
364
365 if (of_property_read_bool(node, "ti,lock"))
366 dpll_mode |= 1 << DPLL_LOCKED;
367
368 if (!of_property_read_u32(node, "ti,min-div", &min_div) &&
369 min_div > dd->min_divider)
370 dd->min_divider = min_div;
371
372 if (dpll_mode)
373 dd->modes = dpll_mode;
374
375 _register_dpll(&clk_hw->hw, node);
376 return;
377
378 cleanup:
379 kfree(dd);
380 kfree(parent_names);
381 kfree(init);
382 kfree(clk_hw);
383 }
384
385 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
386 defined(CONFIG_SOC_DRA7XX)
of_ti_omap4_dpll_x2_setup(struct device_node * node)387 static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
388 {
389 _register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
390 }
391 CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
392 of_ti_omap4_dpll_x2_setup);
393 #endif
394
395 #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
of_ti_am3_dpll_x2_setup(struct device_node * node)396 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
397 {
398 _register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
399 }
400 CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
401 of_ti_am3_dpll_x2_setup);
402 #endif
403
404 #ifdef CONFIG_ARCH_OMAP3
of_ti_omap3_dpll_setup(struct device_node * node)405 static void __init of_ti_omap3_dpll_setup(struct device_node *node)
406 {
407 const struct dpll_data dd = {
408 .idlest_mask = 0x1,
409 .enable_mask = 0x7,
410 .autoidle_mask = 0x7,
411 .mult_mask = 0x7ff << 8,
412 .div1_mask = 0x7f,
413 .max_multiplier = 2047,
414 .max_divider = 128,
415 .min_divider = 1,
416 .freqsel_mask = 0xf0,
417 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
418 };
419
420 if ((of_machine_is_compatible("ti,omap3630") ||
421 of_machine_is_compatible("ti,omap36xx")) &&
422 of_node_name_eq(node, "dpll5_ck"))
423 of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd);
424 else
425 of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
426 }
427 CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
428 of_ti_omap3_dpll_setup);
429
of_ti_omap3_core_dpll_setup(struct device_node * node)430 static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
431 {
432 const struct dpll_data dd = {
433 .idlest_mask = 0x1,
434 .enable_mask = 0x7,
435 .autoidle_mask = 0x7,
436 .mult_mask = 0x7ff << 16,
437 .div1_mask = 0x7f << 8,
438 .max_multiplier = 2047,
439 .max_divider = 128,
440 .min_divider = 1,
441 .freqsel_mask = 0xf0,
442 };
443
444 of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd);
445 }
446 CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock",
447 of_ti_omap3_core_dpll_setup);
448
of_ti_omap3_per_dpll_setup(struct device_node * node)449 static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
450 {
451 const struct dpll_data dd = {
452 .idlest_mask = 0x1 << 1,
453 .enable_mask = 0x7 << 16,
454 .autoidle_mask = 0x7 << 3,
455 .mult_mask = 0x7ff << 8,
456 .div1_mask = 0x7f,
457 .max_multiplier = 2047,
458 .max_divider = 128,
459 .min_divider = 1,
460 .freqsel_mask = 0xf00000,
461 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
462 };
463
464 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
465 }
466 CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock",
467 of_ti_omap3_per_dpll_setup);
468
of_ti_omap3_per_jtype_dpll_setup(struct device_node * node)469 static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
470 {
471 const struct dpll_data dd = {
472 .idlest_mask = 0x1 << 1,
473 .enable_mask = 0x7 << 16,
474 .autoidle_mask = 0x7 << 3,
475 .mult_mask = 0xfff << 8,
476 .div1_mask = 0x7f,
477 .max_multiplier = 4095,
478 .max_divider = 128,
479 .min_divider = 1,
480 .sddiv_mask = 0xff << 24,
481 .dco_mask = 0xe << 20,
482 .flags = DPLL_J_TYPE,
483 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
484 };
485
486 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
487 }
488 CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock",
489 of_ti_omap3_per_jtype_dpll_setup);
490 #endif
491
of_ti_omap4_dpll_setup(struct device_node * node)492 static void __init of_ti_omap4_dpll_setup(struct device_node *node)
493 {
494 const struct dpll_data dd = {
495 .idlest_mask = 0x1,
496 .enable_mask = 0x7,
497 .autoidle_mask = 0x7,
498 .mult_mask = 0x7ff << 8,
499 .div1_mask = 0x7f,
500 .max_multiplier = 2047,
501 .max_divider = 128,
502 .min_divider = 1,
503 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
504 };
505
506 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
507 }
508 CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock",
509 of_ti_omap4_dpll_setup);
510
of_ti_omap5_mpu_dpll_setup(struct device_node * node)511 static void __init of_ti_omap5_mpu_dpll_setup(struct device_node *node)
512 {
513 const struct dpll_data dd = {
514 .idlest_mask = 0x1,
515 .enable_mask = 0x7,
516 .autoidle_mask = 0x7,
517 .mult_mask = 0x7ff << 8,
518 .div1_mask = 0x7f,
519 .max_multiplier = 2047,
520 .max_divider = 128,
521 .dcc_mask = BIT(22),
522 .dcc_rate = 1400000000, /* DCC beyond 1.4GHz */
523 .min_divider = 1,
524 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
525 };
526
527 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
528 }
529 CLK_OF_DECLARE(of_ti_omap5_mpu_dpll_clock, "ti,omap5-mpu-dpll-clock",
530 of_ti_omap5_mpu_dpll_setup);
531
of_ti_omap4_core_dpll_setup(struct device_node * node)532 static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
533 {
534 const struct dpll_data dd = {
535 .idlest_mask = 0x1,
536 .enable_mask = 0x7,
537 .autoidle_mask = 0x7,
538 .mult_mask = 0x7ff << 8,
539 .div1_mask = 0x7f,
540 .max_multiplier = 2047,
541 .max_divider = 128,
542 .min_divider = 1,
543 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
544 };
545
546 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
547 }
548 CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock",
549 of_ti_omap4_core_dpll_setup);
550
551 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
552 defined(CONFIG_SOC_DRA7XX)
of_ti_omap4_m4xen_dpll_setup(struct device_node * node)553 static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
554 {
555 const struct dpll_data dd = {
556 .idlest_mask = 0x1,
557 .enable_mask = 0x7,
558 .autoidle_mask = 0x7,
559 .mult_mask = 0x7ff << 8,
560 .div1_mask = 0x7f,
561 .max_multiplier = 2047,
562 .max_divider = 128,
563 .min_divider = 1,
564 .m4xen_mask = 0x800,
565 .lpmode_mask = 1 << 10,
566 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
567 };
568
569 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
570 }
571 CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock",
572 of_ti_omap4_m4xen_dpll_setup);
573
of_ti_omap4_jtype_dpll_setup(struct device_node * node)574 static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
575 {
576 const struct dpll_data dd = {
577 .idlest_mask = 0x1,
578 .enable_mask = 0x7,
579 .autoidle_mask = 0x7,
580 .mult_mask = 0xfff << 8,
581 .div1_mask = 0xff,
582 .max_multiplier = 4095,
583 .max_divider = 256,
584 .min_divider = 1,
585 .sddiv_mask = 0xff << 24,
586 .flags = DPLL_J_TYPE,
587 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
588 };
589
590 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
591 }
592 CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock",
593 of_ti_omap4_jtype_dpll_setup);
594 #endif
595
of_ti_am3_no_gate_dpll_setup(struct device_node * node)596 static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
597 {
598 const struct dpll_data dd = {
599 .idlest_mask = 0x1,
600 .enable_mask = 0x7,
601 .ssc_enable_mask = 0x1 << 12,
602 .ssc_downspread_mask = 0x1 << 14,
603 .mult_mask = 0x7ff << 8,
604 .div1_mask = 0x7f,
605 .ssc_deltam_int_mask = 0x3 << 18,
606 .ssc_deltam_frac_mask = 0x3ffff,
607 .ssc_modfreq_mant_mask = 0x7f,
608 .ssc_modfreq_exp_mask = 0x7 << 8,
609 .max_multiplier = 2047,
610 .max_divider = 128,
611 .min_divider = 1,
612 .max_rate = 1000000000,
613 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
614 };
615
616 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
617 }
618 CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock",
619 of_ti_am3_no_gate_dpll_setup);
620
of_ti_am3_jtype_dpll_setup(struct device_node * node)621 static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
622 {
623 const struct dpll_data dd = {
624 .idlest_mask = 0x1,
625 .enable_mask = 0x7,
626 .mult_mask = 0x7ff << 8,
627 .div1_mask = 0x7f,
628 .max_multiplier = 4095,
629 .max_divider = 256,
630 .min_divider = 2,
631 .flags = DPLL_J_TYPE,
632 .max_rate = 2000000000,
633 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
634 };
635
636 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
637 }
638 CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock",
639 of_ti_am3_jtype_dpll_setup);
640
of_ti_am3_no_gate_jtype_dpll_setup(struct device_node * node)641 static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
642 {
643 const struct dpll_data dd = {
644 .idlest_mask = 0x1,
645 .enable_mask = 0x7,
646 .mult_mask = 0x7ff << 8,
647 .div1_mask = 0x7f,
648 .max_multiplier = 2047,
649 .max_divider = 128,
650 .min_divider = 1,
651 .max_rate = 2000000000,
652 .flags = DPLL_J_TYPE,
653 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
654 };
655
656 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
657 }
658 CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock,
659 "ti,am3-dpll-no-gate-j-type-clock",
660 of_ti_am3_no_gate_jtype_dpll_setup);
661
of_ti_am3_dpll_setup(struct device_node * node)662 static void __init of_ti_am3_dpll_setup(struct device_node *node)
663 {
664 const struct dpll_data dd = {
665 .idlest_mask = 0x1,
666 .enable_mask = 0x7,
667 .ssc_enable_mask = 0x1 << 12,
668 .ssc_downspread_mask = 0x1 << 14,
669 .mult_mask = 0x7ff << 8,
670 .div1_mask = 0x7f,
671 .ssc_deltam_int_mask = 0x3 << 18,
672 .ssc_deltam_frac_mask = 0x3ffff,
673 .ssc_modfreq_mant_mask = 0x7f,
674 .ssc_modfreq_exp_mask = 0x7 << 8,
675 .max_multiplier = 2047,
676 .max_divider = 128,
677 .min_divider = 1,
678 .max_rate = 1000000000,
679 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
680 };
681
682 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
683 }
684 CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup);
685
of_ti_am3_core_dpll_setup(struct device_node * node)686 static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
687 {
688 const struct dpll_data dd = {
689 .idlest_mask = 0x1,
690 .enable_mask = 0x7,
691 .mult_mask = 0x7ff << 8,
692 .div1_mask = 0x7f,
693 .max_multiplier = 2047,
694 .max_divider = 128,
695 .min_divider = 1,
696 .max_rate = 1000000000,
697 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
698 };
699
700 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
701 }
702 CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock",
703 of_ti_am3_core_dpll_setup);
704
of_ti_omap2_core_dpll_setup(struct device_node * node)705 static void __init of_ti_omap2_core_dpll_setup(struct device_node *node)
706 {
707 const struct dpll_data dd = {
708 .enable_mask = 0x3,
709 .mult_mask = 0x3ff << 12,
710 .div1_mask = 0xf << 8,
711 .max_divider = 16,
712 .min_divider = 1,
713 };
714
715 of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd);
716 }
717 CLK_OF_DECLARE(ti_omap2_core_dpll_clock, "ti,omap2-dpll-core-clock",
718 of_ti_omap2_core_dpll_setup);
719