1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * OMAP clkctrl clock support
4 *
5 * Copyright (C) 2017 Texas Instruments, Inc.
6 *
7 * Tero Kristo <t-kristo@ti.com>
8 */
9
10 #include <linux/clk-provider.h>
11 #include <linux/slab.h>
12 #include <linux/of.h>
13 #include <linux/of_address.h>
14 #include <linux/clk/ti.h>
15 #include <linux/delay.h>
16 #include <linux/string_helpers.h>
17 #include <linux/timekeeping.h>
18 #include "clock.h"
19
20 #define NO_IDLEST 0
21
22 #define OMAP4_MODULEMODE_MASK 0x3
23
24 #define MODULEMODE_HWCTRL 0x1
25 #define MODULEMODE_SWCTRL 0x2
26
27 #define OMAP4_IDLEST_MASK (0x3 << 16)
28 #define OMAP4_IDLEST_SHIFT 16
29
30 #define OMAP4_STBYST_MASK BIT(18)
31 #define OMAP4_STBYST_SHIFT 18
32
33 #define CLKCTRL_IDLEST_FUNCTIONAL 0x0
34 #define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2
35 #define CLKCTRL_IDLEST_DISABLED 0x3
36
37 /* These timeouts are in us */
38 #define OMAP4_MAX_MODULE_READY_TIME 2000
39 #define OMAP4_MAX_MODULE_DISABLE_TIME 5000
40
41 static bool _early_timeout = true;
42
43 struct omap_clkctrl_provider {
44 void __iomem *base;
45 struct list_head clocks;
46 char *clkdm_name;
47 };
48
49 struct omap_clkctrl_clk {
50 struct clk_hw *clk;
51 u16 reg_offset;
52 int bit_offset;
53 struct list_head node;
54 };
55
56 union omap4_timeout {
57 u32 cycles;
58 ktime_t start;
59 };
60
61 static const struct omap_clkctrl_data default_clkctrl_data[] __initconst = {
62 { 0 },
63 };
64
_omap4_idlest(u32 val)65 static u32 _omap4_idlest(u32 val)
66 {
67 val &= OMAP4_IDLEST_MASK;
68 val >>= OMAP4_IDLEST_SHIFT;
69
70 return val;
71 }
72
_omap4_is_idle(u32 val)73 static bool _omap4_is_idle(u32 val)
74 {
75 val = _omap4_idlest(val);
76
77 return val == CLKCTRL_IDLEST_DISABLED;
78 }
79
_omap4_is_ready(u32 val)80 static bool _omap4_is_ready(u32 val)
81 {
82 val = _omap4_idlest(val);
83
84 return val == CLKCTRL_IDLEST_FUNCTIONAL ||
85 val == CLKCTRL_IDLEST_INTERFACE_IDLE;
86 }
87
_omap4_is_timeout(union omap4_timeout * time,u32 timeout)88 static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
89 {
90 /*
91 * There are two special cases where ktime_to_ns() can't be
92 * used to track the timeouts. First one is during early boot
93 * when the timers haven't been initialized yet. The second
94 * one is during suspend-resume cycle while timekeeping is
95 * being suspended / resumed. Clocksource for the system
96 * can be from a timer that requires pm_runtime access, which
97 * will eventually bring us here with timekeeping_suspended,
98 * during both suspend entry and resume paths. This happens
99 * at least on am43xx platform. Account for flakeyness
100 * with udelay() by multiplying the timeout value by 2.
101 */
102 if (unlikely(_early_timeout || timekeeping_suspended)) {
103 if (time->cycles++ < timeout) {
104 udelay(1 * 2);
105 return false;
106 }
107 } else {
108 if (!ktime_to_ns(time->start)) {
109 time->start = ktime_get();
110 return false;
111 }
112
113 if (ktime_us_delta(ktime_get(), time->start) < timeout) {
114 cpu_relax();
115 return false;
116 }
117 }
118
119 return true;
120 }
121
_omap4_disable_early_timeout(void)122 static int __init _omap4_disable_early_timeout(void)
123 {
124 _early_timeout = false;
125
126 return 0;
127 }
128 arch_initcall(_omap4_disable_early_timeout);
129
_omap4_clkctrl_clk_enable(struct clk_hw * hw)130 static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
131 {
132 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
133 u32 val;
134 int ret;
135 union omap4_timeout timeout = { 0 };
136
137 if (clk->clkdm) {
138 ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
139 if (ret) {
140 WARN(1,
141 "%s: could not enable %s's clockdomain %s: %d\n",
142 __func__, clk_hw_get_name(hw),
143 clk->clkdm_name, ret);
144 return ret;
145 }
146 }
147
148 if (!clk->enable_bit)
149 return 0;
150
151 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
152
153 val &= ~OMAP4_MODULEMODE_MASK;
154 val |= clk->enable_bit;
155
156 ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
157
158 if (test_bit(NO_IDLEST, &clk->flags))
159 return 0;
160
161 /* Wait until module is enabled */
162 while (!_omap4_is_ready(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
163 if (_omap4_is_timeout(&timeout, OMAP4_MAX_MODULE_READY_TIME)) {
164 pr_err("%s: failed to enable\n", clk_hw_get_name(hw));
165 return -EBUSY;
166 }
167 }
168
169 return 0;
170 }
171
_omap4_clkctrl_clk_disable(struct clk_hw * hw)172 static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
173 {
174 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
175 u32 val;
176 union omap4_timeout timeout = { 0 };
177
178 if (!clk->enable_bit)
179 goto exit;
180
181 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
182
183 val &= ~OMAP4_MODULEMODE_MASK;
184
185 ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
186
187 if (test_bit(NO_IDLEST, &clk->flags))
188 goto exit;
189
190 /* Wait until module is disabled */
191 while (!_omap4_is_idle(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
192 if (_omap4_is_timeout(&timeout,
193 OMAP4_MAX_MODULE_DISABLE_TIME)) {
194 pr_err("%s: failed to disable\n", clk_hw_get_name(hw));
195 break;
196 }
197 }
198
199 exit:
200 if (clk->clkdm)
201 ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
202 }
203
_omap4_clkctrl_clk_is_enabled(struct clk_hw * hw)204 static int _omap4_clkctrl_clk_is_enabled(struct clk_hw *hw)
205 {
206 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
207 u32 val;
208
209 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
210
211 if (val & clk->enable_bit)
212 return 1;
213
214 return 0;
215 }
216
217 static const struct clk_ops omap4_clkctrl_clk_ops = {
218 .enable = _omap4_clkctrl_clk_enable,
219 .disable = _omap4_clkctrl_clk_disable,
220 .is_enabled = _omap4_clkctrl_clk_is_enabled,
221 .init = omap2_init_clk_clkdm,
222 };
223
_ti_omap4_clkctrl_xlate(struct of_phandle_args * clkspec,void * data)224 static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
225 void *data)
226 {
227 struct omap_clkctrl_provider *provider = data;
228 struct omap_clkctrl_clk *entry = NULL, *iter;
229
230 if (clkspec->args_count != 2)
231 return ERR_PTR(-EINVAL);
232
233 pr_debug("%s: looking for %x:%x\n", __func__,
234 clkspec->args[0], clkspec->args[1]);
235
236 list_for_each_entry(iter, &provider->clocks, node) {
237 if (iter->reg_offset == clkspec->args[0] &&
238 iter->bit_offset == clkspec->args[1]) {
239 entry = iter;
240 break;
241 }
242 }
243
244 if (!entry)
245 return ERR_PTR(-EINVAL);
246
247 return entry->clk;
248 }
249
250 /* Get clkctrl clock base name based on clkctrl_name or dts node */
clkctrl_get_clock_name(struct device_node * np,const char * clkctrl_name,int offset,int index,bool legacy_naming)251 static const char * __init clkctrl_get_clock_name(struct device_node *np,
252 const char *clkctrl_name,
253 int offset, int index,
254 bool legacy_naming)
255 {
256 char *clock_name;
257
258 /* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
259 if (clkctrl_name && !legacy_naming) {
260 clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
261 clkctrl_name, offset, index);
262 if (!clock_name)
263 return NULL;
264
265 strreplace(clock_name, '_', '-');
266
267 return clock_name;
268 }
269
270 /* l4per:1234:0 old style naming based on clkctrl_name */
271 if (clkctrl_name)
272 return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
273 clkctrl_name, offset, index);
274
275 /* l4per_cm:1234:0 old style naming based on parent node name */
276 if (legacy_naming)
277 return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
278 np->parent, offset, index);
279
280 /* l4per-clkctrl:1234:0 style naming based on node name */
281 return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
282 }
283
284 static int __init
_ti_clkctrl_clk_register(struct omap_clkctrl_provider * provider,struct device_node * node,struct clk_hw * clk_hw,u16 offset,u8 bit,const char * const * parents,int num_parents,const struct clk_ops * ops,const char * clkctrl_name)285 _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
286 struct device_node *node, struct clk_hw *clk_hw,
287 u16 offset, u8 bit, const char * const *parents,
288 int num_parents, const struct clk_ops *ops,
289 const char *clkctrl_name)
290 {
291 struct clk_init_data init = { NULL };
292 struct clk *clk;
293 struct omap_clkctrl_clk *clkctrl_clk;
294 int ret = 0;
295
296 init.name = clkctrl_get_clock_name(node, clkctrl_name, offset, bit,
297 ti_clk_get_features()->flags &
298 TI_CLK_CLKCTRL_COMPAT);
299
300 clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
301 if (!init.name || !clkctrl_clk) {
302 ret = -ENOMEM;
303 goto cleanup;
304 }
305
306 clk_hw->init = &init;
307 init.parent_names = parents;
308 init.num_parents = num_parents;
309 init.ops = ops;
310 init.flags = 0;
311
312 clk = of_ti_clk_register(node, clk_hw, init.name);
313 if (IS_ERR_OR_NULL(clk)) {
314 ret = -EINVAL;
315 goto cleanup;
316 }
317
318 clkctrl_clk->reg_offset = offset;
319 clkctrl_clk->bit_offset = bit;
320 clkctrl_clk->clk = clk_hw;
321
322 list_add(&clkctrl_clk->node, &provider->clocks);
323
324 return 0;
325
326 cleanup:
327 kfree(init.name);
328 kfree(clkctrl_clk);
329 return ret;
330 }
331
332 static void __init
_ti_clkctrl_setup_gate(struct omap_clkctrl_provider * provider,struct device_node * node,u16 offset,const struct omap_clkctrl_bit_data * data,void __iomem * reg,const char * clkctrl_name)333 _ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
334 struct device_node *node, u16 offset,
335 const struct omap_clkctrl_bit_data *data,
336 void __iomem *reg, const char *clkctrl_name)
337 {
338 struct clk_hw_omap *clk_hw;
339
340 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
341 if (!clk_hw)
342 return;
343
344 clk_hw->enable_bit = data->bit;
345 clk_hw->enable_reg.ptr = reg;
346
347 if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
348 data->bit, data->parents, 1,
349 &omap_gate_clk_ops, clkctrl_name))
350 kfree(clk_hw);
351 }
352
353 static void __init
_ti_clkctrl_setup_mux(struct omap_clkctrl_provider * provider,struct device_node * node,u16 offset,const struct omap_clkctrl_bit_data * data,void __iomem * reg,const char * clkctrl_name)354 _ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
355 struct device_node *node, u16 offset,
356 const struct omap_clkctrl_bit_data *data,
357 void __iomem *reg, const char *clkctrl_name)
358 {
359 struct clk_omap_mux *mux;
360 int num_parents = 0;
361 const char * const *pname;
362
363 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
364 if (!mux)
365 return;
366
367 pname = data->parents;
368 while (*pname) {
369 num_parents++;
370 pname++;
371 }
372
373 mux->mask = num_parents;
374 if (!(mux->flags & CLK_MUX_INDEX_ONE))
375 mux->mask--;
376
377 mux->mask = (1 << fls(mux->mask)) - 1;
378
379 mux->shift = data->bit;
380 mux->reg.ptr = reg;
381
382 if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
383 data->bit, data->parents, num_parents,
384 &ti_clk_mux_ops, clkctrl_name))
385 kfree(mux);
386 }
387
388 static void __init
_ti_clkctrl_setup_div(struct omap_clkctrl_provider * provider,struct device_node * node,u16 offset,const struct omap_clkctrl_bit_data * data,void __iomem * reg,const char * clkctrl_name)389 _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
390 struct device_node *node, u16 offset,
391 const struct omap_clkctrl_bit_data *data,
392 void __iomem *reg, const char *clkctrl_name)
393 {
394 struct clk_omap_divider *div;
395 const struct omap_clkctrl_div_data *div_data = data->data;
396 u8 div_flags = 0;
397
398 div = kzalloc(sizeof(*div), GFP_KERNEL);
399 if (!div)
400 return;
401
402 div->reg.ptr = reg;
403 div->shift = data->bit;
404 div->flags = div_data->flags;
405
406 if (div->flags & CLK_DIVIDER_POWER_OF_TWO)
407 div_flags |= CLKF_INDEX_POWER_OF_TWO;
408
409 if (ti_clk_parse_divider_data((int *)div_data->dividers, 0,
410 div_data->max_div, div_flags,
411 div)) {
412 pr_err("%s: Data parsing for %pOF:%04x:%d failed\n", __func__,
413 node, offset, data->bit);
414 kfree(div);
415 return;
416 }
417
418 if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
419 data->bit, data->parents, 1,
420 &ti_clk_divider_ops, clkctrl_name))
421 kfree(div);
422 }
423
424 static void __init
_ti_clkctrl_setup_subclks(struct omap_clkctrl_provider * provider,struct device_node * node,const struct omap_clkctrl_reg_data * data,void __iomem * reg,const char * clkctrl_name)425 _ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
426 struct device_node *node,
427 const struct omap_clkctrl_reg_data *data,
428 void __iomem *reg, const char *clkctrl_name)
429 {
430 const struct omap_clkctrl_bit_data *bits = data->bit_data;
431
432 if (!bits)
433 return;
434
435 while (bits->bit) {
436 switch (bits->type) {
437 case TI_CLK_GATE:
438 _ti_clkctrl_setup_gate(provider, node, data->offset,
439 bits, reg, clkctrl_name);
440 break;
441
442 case TI_CLK_DIVIDER:
443 _ti_clkctrl_setup_div(provider, node, data->offset,
444 bits, reg, clkctrl_name);
445 break;
446
447 case TI_CLK_MUX:
448 _ti_clkctrl_setup_mux(provider, node, data->offset,
449 bits, reg, clkctrl_name);
450 break;
451
452 default:
453 pr_err("%s: bad subclk type: %d\n", __func__,
454 bits->type);
455 return;
456 }
457 bits++;
458 }
459 }
460
_clkctrl_add_provider(void * data,struct device_node * np)461 static void __init _clkctrl_add_provider(void *data,
462 struct device_node *np)
463 {
464 of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
465 }
466
467 /*
468 * Get clock name based on "clock-output-names" property or the
469 * compatible property for clkctrl.
470 */
clkctrl_get_name(struct device_node * np)471 static const char * __init clkctrl_get_name(struct device_node *np)
472 {
473 struct property *prop;
474 const int prefix_len = 11;
475 const char *compat;
476 const char *output;
477 const char *end;
478 char *name;
479
480 if (!of_property_read_string_index(np, "clock-output-names", 0,
481 &output)) {
482 int len;
483
484 len = strlen(output);
485 end = strstr(output, "_clkctrl");
486 if (end)
487 len -= strlen(end);
488 name = kstrndup(output, len, GFP_KERNEL);
489
490 return name;
491 }
492
493 of_property_for_each_string(np, "compatible", prop, compat) {
494 if (!strncmp("ti,clkctrl-", compat, prefix_len)) {
495 end = compat + prefix_len;
496 /* Two letter minimum name length for l3, l4 etc */
497 if (strnlen(end, 16) < 2)
498 continue;
499 name = kstrdup_and_replace(end, '-', '_', GFP_KERNEL);
500 if (!name)
501 continue;
502
503 return name;
504 }
505 }
506
507 return NULL;
508 }
509
_ti_omap4_clkctrl_setup(struct device_node * node)510 static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
511 {
512 struct omap_clkctrl_provider *provider;
513 const struct omap_clkctrl_data *data = default_clkctrl_data;
514 const struct omap_clkctrl_reg_data *reg_data;
515 struct clk_init_data init = { NULL };
516 struct clk_hw_omap *hw;
517 struct clk *clk;
518 struct omap_clkctrl_clk *clkctrl_clk = NULL;
519 bool legacy_naming;
520 const char *clkctrl_name;
521 u32 addr;
522 int ret;
523 char *c;
524 u16 soc_mask = 0;
525 struct resource res;
526
527 of_address_to_resource(node, 0, &res);
528 addr = (u32)res.start;
529
530 #ifdef CONFIG_ARCH_OMAP4
531 if (of_machine_is_compatible("ti,omap4"))
532 data = omap4_clkctrl_data;
533 #endif
534 #ifdef CONFIG_SOC_OMAP5
535 if (of_machine_is_compatible("ti,omap5"))
536 data = omap5_clkctrl_data;
537 #endif
538 #ifdef CONFIG_SOC_DRA7XX
539 if (of_machine_is_compatible("ti,dra7"))
540 data = dra7_clkctrl_data;
541 if (of_machine_is_compatible("ti,dra72"))
542 soc_mask = CLKF_SOC_DRA72;
543 if (of_machine_is_compatible("ti,dra74"))
544 soc_mask = CLKF_SOC_DRA74;
545 if (of_machine_is_compatible("ti,dra76"))
546 soc_mask = CLKF_SOC_DRA76;
547 #endif
548 #ifdef CONFIG_SOC_AM33XX
549 if (of_machine_is_compatible("ti,am33xx"))
550 data = am3_clkctrl_data;
551 #endif
552 #ifdef CONFIG_SOC_AM43XX
553 if (of_machine_is_compatible("ti,am4372"))
554 data = am4_clkctrl_data;
555
556 if (of_machine_is_compatible("ti,am438x"))
557 data = am438x_clkctrl_data;
558 #endif
559 #ifdef CONFIG_SOC_TI81XX
560 if (of_machine_is_compatible("ti,dm814"))
561 data = dm814_clkctrl_data;
562
563 if (of_machine_is_compatible("ti,dm816"))
564 data = dm816_clkctrl_data;
565 #endif
566
567 if (ti_clk_get_features()->flags & TI_CLK_DEVICE_TYPE_GP)
568 soc_mask |= CLKF_SOC_NONSEC;
569
570 while (data->addr) {
571 if (addr == data->addr)
572 break;
573
574 data++;
575 }
576
577 if (!data->addr) {
578 pr_err("%pOF not found from clkctrl data.\n", node);
579 return;
580 }
581
582 provider = kzalloc(sizeof(*provider), GFP_KERNEL);
583 if (!provider)
584 return;
585
586 provider->base = of_iomap(node, 0);
587
588 legacy_naming = ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT;
589 clkctrl_name = clkctrl_get_name(node);
590 if (clkctrl_name) {
591 provider->clkdm_name = kasprintf(GFP_KERNEL,
592 "%s_clkdm", clkctrl_name);
593 if (!provider->clkdm_name) {
594 kfree(provider);
595 return;
596 }
597 goto clkdm_found;
598 }
599
600 /*
601 * The code below can be removed when all clkctrl nodes use domain
602 * specific compatible property and standard clock node naming
603 */
604 if (legacy_naming) {
605 provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
606 if (!provider->clkdm_name) {
607 kfree(provider);
608 return;
609 }
610
611 /*
612 * Create default clkdm name, replace _cm from end of parent
613 * node name with _clkdm
614 */
615 provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
616 } else {
617 provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
618 if (!provider->clkdm_name) {
619 kfree(provider);
620 return;
621 }
622
623 /*
624 * Create default clkdm name, replace _clkctrl from end of
625 * node name with _clkdm
626 */
627 provider->clkdm_name[strlen(provider->clkdm_name) - 7] = 0;
628 }
629
630 strcat(provider->clkdm_name, "clkdm");
631
632 /* Replace any dash from the clkdm name with underscore */
633 c = provider->clkdm_name;
634
635 while (*c) {
636 if (*c == '-')
637 *c = '_';
638 c++;
639 }
640 clkdm_found:
641 INIT_LIST_HEAD(&provider->clocks);
642
643 /* Generate clocks */
644 reg_data = data->regs;
645
646 while (reg_data->parent) {
647 if ((reg_data->flags & CLKF_SOC_MASK) &&
648 (reg_data->flags & soc_mask) == 0) {
649 reg_data++;
650 continue;
651 }
652
653 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
654 if (!hw)
655 return;
656
657 hw->enable_reg.ptr = provider->base + reg_data->offset;
658
659 _ti_clkctrl_setup_subclks(provider, node, reg_data,
660 hw->enable_reg.ptr, clkctrl_name);
661
662 if (reg_data->flags & CLKF_SW_SUP)
663 hw->enable_bit = MODULEMODE_SWCTRL;
664 if (reg_data->flags & CLKF_HW_SUP)
665 hw->enable_bit = MODULEMODE_HWCTRL;
666 if (reg_data->flags & CLKF_NO_IDLEST)
667 set_bit(NO_IDLEST, &hw->flags);
668
669 if (reg_data->clkdm_name)
670 hw->clkdm_name = reg_data->clkdm_name;
671 else
672 hw->clkdm_name = provider->clkdm_name;
673
674 init.parent_names = ®_data->parent;
675 init.num_parents = 1;
676 init.flags = 0;
677 if (reg_data->flags & CLKF_SET_RATE_PARENT)
678 init.flags |= CLK_SET_RATE_PARENT;
679
680 init.name = clkctrl_get_clock_name(node, clkctrl_name,
681 reg_data->offset, 0,
682 legacy_naming);
683 if (!init.name)
684 goto cleanup;
685
686 clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
687 if (!clkctrl_clk)
688 goto cleanup;
689
690 init.ops = &omap4_clkctrl_clk_ops;
691 hw->hw.init = &init;
692
693 clk = of_ti_clk_register_omap_hw(node, &hw->hw, init.name);
694 if (IS_ERR_OR_NULL(clk))
695 goto cleanup;
696
697 clkctrl_clk->reg_offset = reg_data->offset;
698 clkctrl_clk->clk = &hw->hw;
699
700 list_add(&clkctrl_clk->node, &provider->clocks);
701
702 reg_data++;
703 }
704
705 ret = of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider);
706 if (ret == -EPROBE_DEFER)
707 ti_clk_retry_init(node, provider, _clkctrl_add_provider);
708
709 kfree(clkctrl_name);
710
711 return;
712
713 cleanup:
714 kfree(hw);
715 kfree(init.name);
716 kfree(clkctrl_name);
717 kfree(clkctrl_clk);
718 }
719 CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
720 _ti_omap4_clkctrl_setup);
721
722 /**
723 * ti_clk_is_in_standby - Check if clkctrl clock is in standby or not
724 * @clk: clock to check standby status for
725 *
726 * Finds whether the provided clock is in standby mode or not. Returns
727 * true if the provided clock is a clkctrl type clock and it is in standby,
728 * false otherwise.
729 */
ti_clk_is_in_standby(struct clk * clk)730 bool ti_clk_is_in_standby(struct clk *clk)
731 {
732 struct clk_hw *hw;
733 struct clk_hw_omap *hwclk;
734 u32 val;
735
736 hw = __clk_get_hw(clk);
737
738 if (!omap2_clk_is_hw_omap(hw))
739 return false;
740
741 hwclk = to_clk_hw_omap(hw);
742
743 val = ti_clk_ll_ops->clk_readl(&hwclk->enable_reg);
744
745 if (val & OMAP4_STBYST_MASK)
746 return true;
747
748 return false;
749 }
750 EXPORT_SYMBOL_GPL(ti_clk_is_in_standby);
751